blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea2c22d2bcc968840f2546a7797fd481f4baee63
|
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
|
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ModifyAccountPasswordRequest.py
|
f043c7559542a8e3d3c3580f0a4b31e7c654201e
|
[
"Apache-2.0"
] |
permissive
|
P79N6A/dysms_python
|
44b634ffb2856b81d5f79f65889bfd5232a9b546
|
f44877b35817e103eed469a637813efffa1be3e4
|
refs/heads/master
| 2020-04-28T15:25:00.368913
| 2019-03-13T07:52:34
| 2019-03-13T07:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyAccountPasswordRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'ModifyAccountPassword','polardb')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_NewAccountPassword(self):
return self.get_query_params().get('NewAccountPassword')
def set_NewAccountPassword(self,NewAccountPassword):
self.add_query_param('NewAccountPassword',NewAccountPassword)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
[
"1478458905@qq.com"
] |
1478458905@qq.com
|
20a2640e2ad54b344e5be1bcbd8dfe4f8745ed6b
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Chess-py/gui/gui_functions.py
|
ee41fde1220a24a6a79a27e9b11f9b5729a73a9c
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:920dea71adf194f81da15c63d5ab5246c6637ed6329661630abdf4d56b12f7a6
size 9635
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
5e5ce0df1b1faf85f26ec4a9c54d6ac980b61e5a
|
542f898adea1b36d627d4bf437731022f242d2dd
|
/projects/TridentNet/tridentnet/trident_backbone.py
|
7789bd219b01d452e876ad2ad7f811502719465c
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/detectron2
|
24bf508e374a98a5e5d1bd4cc96556d5914215f4
|
80307d2d5e06f06a8a677cc2653f23a4c56402ac
|
refs/heads/main
| 2023-08-30T17:00:01.293772
| 2023-08-25T22:10:24
| 2023-08-25T22:10:24
| 206,660,580
| 27,469
| 8,047
|
Apache-2.0
| 2023-09-13T09:25:57
| 2019-09-05T21:30:20
|
Python
|
UTF-8
|
Python
| false
| false
| 7,846
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase
from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock
from .trident_conv import TridentConv
__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"]
class TridentBottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
num_branch=3,
dilations=(1, 2, 3),
concat_output=False,
test_branch_idx=-1,
):
"""
Args:
num_branch (int): the number of branches in TridentNet.
dilations (tuple): the dilations of multiple branches in TridentNet.
concat_output (bool): if concatenate outputs of multiple branches in TridentNet.
Use 'True' for the last trident block.
"""
super().__init__(in_channels, out_channels, stride)
assert num_branch == len(dilations)
self.num_branch = num_branch
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = TridentConv(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
paddings=dilations,
bias=False,
groups=num_groups,
dilations=dilations,
num_branch=num_branch,
test_branch_idx=test_branch_idx,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
if not isinstance(x, list):
x = [x] * num_branch
out = [self.conv1(b) for b in x]
out = [F.relu_(b) for b in out]
out = self.conv2(out)
out = [F.relu_(b) for b in out]
out = [self.conv3(b) for b in out]
if self.shortcut is not None:
shortcut = [self.shortcut(b) for b in x]
else:
shortcut = x
out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)]
out = [F.relu_(b) for b in out]
if self.concat_output:
out = torch.cat(out)
return out
def make_trident_stage(block_class, num_blocks, **kwargs):
"""
Create a resnet stage by creating many blocks for TridentNet.
"""
concat_output = [False] * (num_blocks - 1) + [True]
kwargs["concat_output_per_block"] = concat_output
return ResNet.make_stage(block_class, num_blocks, **kwargs)
@BACKBONE_REGISTRY.register()
def build_trident_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config for TridentNet.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS
trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE
test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5}
out_stage_idx = [res_stage_idx[f] for f in out_features]
trident_stage_idx = res_stage_idx[trident_stage]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if stage_idx == trident_stage_idx:
assert not deform_on_per_stage[
idx
], "Not support deformable conv in Trident blocks yet."
stage_kargs["block_class"] = TridentBottleneckBlock
stage_kargs["num_branch"] = num_branch
stage_kargs["dilations"] = branch_dilations
stage_kargs["test_branch_idx"] = test_branch_idx
stage_kargs.pop("dilation")
elif deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = (
make_trident_stage(**stage_kargs)
if stage_idx == trident_stage_idx
else ResNet.make_stage(**stage_kargs)
)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
1575b08a2c652e7cdf3d3da4db1c9005fb2a2b5b
|
3da6b8a0c049a403374e787149d9523012a1f0fc
|
/Coder_Old/几个好玩有趣的Python入门实例/简单统计/main.py
|
36fcf153c6ae58736a502713a8e34905eff3b104
|
[] |
no_license
|
AndersonHJB/PyCharm_Coder
|
d65250d943e84b523f022f65ef74b13e7c5bc348
|
32f2866f68cc3a391795247d6aba69a7156e6196
|
refs/heads/master
| 2022-07-25T11:43:58.057376
| 2021-08-03T02:50:01
| 2021-08-03T02:50:01
| 348,922,058
| 3
| 3
| null | 2021-09-05T02:20:10
| 2021-03-18T02:57:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# 输入一组数据,计算均值,方差,中位数,绝对相对误差。
# -*- coding: utf-8 -*-
# 输入数据
def getNum():
nums = []
iNumStr = input('please input a sequence of numbers (enter to exit): ')
while iNumStr != '':
nums.append(eval(iNumStr))
iNumStr = input('please input a sequence of numbers (enter to exit): ')
return nums
# 平均数
def average(numbers):
return sum(numbers) / len(numbers)
# 标准差
def dev(numbers, average):
sdev = 0.0
for num in numbers:
sdev += (num - average) ** 2
return pow(sdev / len(numbers), 0.5)
# 中位数
def median(numbers):
sorted(numbers)
size = len(numbers)
if size % 2 == 0:
return (numbers[size // 2 - 1] + numbers[size // 2]) / 2
else:
return numbers[size // 2]
# 绝对与相对误差
def rel_dev(numbers, average):
_max = max(abs(max(numbers) - average), abs(min(numbers) - average))
return _max, _max / average
def main():
nums = getNum()
if len(nums) == 0:
print('no data')
else:
ave = average(nums)
devs = rel_dev(nums, ave)
print('和:{:.4f},平均数:{:.4f},中位数:{:.4f},方差:{:.4f},绝对误差:{:4f},相对误差:{:.4f}' \
.format(sum(nums), ave, median(nums), dev(nums, ave), devs[0], devs[1]))
if __name__ == '__main__':
main()
|
[
"1432803776@qq.com"
] |
1432803776@qq.com
|
7045ae2111e975c1900c7f15ec0532dbbf283c3d
|
9a076ee891aa04dd1522662838dda63ad554e835
|
/manage.py
|
6c345e52c5342bfb1e480ee19abe93787dd7e988
|
[
"MIT"
] |
permissive
|
Albert-Byrone/Pitches
|
fc018b3f46ea325456212154f27426c7d18ef435
|
d9ae032ff0a00b135d03404477e07a8405247b5e
|
refs/heads/master
| 2022-10-16T14:46:38.758963
| 2019-10-22T11:53:35
| 2019-10-22T11:53:35
| 216,051,524
| 0
| 1
|
MIT
| 2022-09-16T18:11:23
| 2019-10-18T15:12:54
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app,db
from app.models import User
app = create_app('production')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
manager.add_command('server',Server(use_debugger=True))
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run()
|
[
"albertbyrone1677@gmail.com"
] |
albertbyrone1677@gmail.com
|
ef6b4848b893f17267f2b33abef55ff4aa3231af
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/batch/jm/parser_errors_2/260891264.py
|
b17d02424ce9eaaf1d928b9919113f11c3a2f91b
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 260891264
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 2, 2, 6)
assert board is not None
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_busy_fields(board, 2) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_busy_fields(board, 2) == 1
assert gamma_golden_move(board, 2, 1, 0) == 0
gamma_delete(board)
|
[
"jakub@molinski.dev"
] |
jakub@molinski.dev
|
1a8b3d4e9b1c958f9b0bce014b558636a21f1219
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/_errors.py
|
b6a0130446adb2d6251c43327f3ea1379148a033
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from OpenGL.error import _ErrorChecker
from OpenGL.platform import PLATFORM as _p
if _ErrorChecker:
_error_checker = _ErrorChecker(_p, _p.GLES2.glGetError)
else:
_error_checker = None
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
22adac66fa6865c32e5f213be1abc3f4001823a7
|
0f58d1d2560d7b3a9c4567ff7431a041ebe8d1ac
|
/0x0A-python-inheritance/8-rectangle.py
|
82cd7919083de7517b18001dd57682fc827dfd43
|
[] |
no_license
|
peluza/holbertonschool-higher_level_programming
|
da2c5fc398ab7669989041f3be53a157638641c2
|
a39327938403413c178b943dbeefe02509957c9b
|
refs/heads/master
| 2022-12-14T08:12:27.444152
| 2020-09-24T21:45:49
| 2020-09-24T21:45:49
| 259,433,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
#!/usr/bin/python3
"""8-rectangle
"""
BaseGeometry = __import__('7-base_geometry').BaseGeometry
class Rectangle(BaseGeometry):
"""Rectangle
Arguments:
BaseGeometry {class} -- the class is BAseGeometry
"""
def __init__(self, width, height):
"""__init__
Arguments:
width {int} -- value is int
height {int} -- value is int
"""
self.integer_validator("width", width)
self.integer_validator("height", height)
self.__width = width
self.__height = height
|
[
"edisonisaza@gmail.com"
] |
edisonisaza@gmail.com
|
e98d82802b45dee7a575478de70e3ddfcbb5feba
|
af0a20320217f1e4140346ed60a585c74f16e205
|
/20.3-html-entities.py
|
a9bc00d754636e462656b97e7a8600edf425709c
|
[] |
no_license
|
JasonOnes/py-library-reviews
|
f749a0f0c6f5ebe820792da8a2bacf00cd6964d5
|
00620cb78f44a1e6f647ae006dc9a35909db0f3c
|
refs/heads/master
| 2021-05-14T13:52:01.060933
| 2018-01-28T17:56:46
| 2018-01-28T17:56:46
| 115,957,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,846
|
py
|
"""Not quite sure how to demo this module as it is basically just four dicts"""
# html.entities
# .html5 => maps char refs to Unicode chars
# .entitydefs => maps XHTML 1.0 entity defs to replacement test in ISO Latin-1 (?)
# .name2codepoint => maps HTML enity names to Unicode code points (?)
# .codepoint2name => Unicode code points to HTML entity names
# replaces htmlentitydefs in python2
import html
def whats_the_html5_name_for_punctuation():
# function explained by name
punctuation_list = ['!', ',', '\'', '\"', '/', '.', ',', '?', '(', ')']
punctuation_names = []
symbol_dict = html.entities.html5
# quick lambda funct to get the key where value is punct char (unique)
get_name = lambda v, symbol_dict: next(k for k in symbol_dict if symbol_dict[k] is v)
name_template = "For {} the HTML5 name is {}."
for item in punctuation_list:
name = get_name(item, symbol_dict)
print(name_template.format(item, name))
def frat_in_ISO(frat):
# uses the entitydefs dict to find and print the english lets of frat in greek
lets = frat.split(" ")
greek_lets = []
for let in lets:
try:
greek_let = html.entities.entitydefs[let]
greek_lets.append(greek_let)
except KeyError:
print("It's NOT all greek to me! \"{}\" in {} mispelled".format(let, frat))
print("Printed what I could!")
frat_in_greek = "".join(greek_lets)
print(frat + " : " + frat_in_greek)
def frat_in_unicode(frat):
# uses the name2codepoint to find and print frat in code (unicode that is!)
lets = frat.split(" ")
codepoint_nums = []
for let in lets:
try:
codepoint = html.entities.name2codepoint[let]
# could make it more "encoded" without comma as delimiter but that's not the point of this
codepoint_nums.append(str(codepoint)+",")
except KeyError:
print("I don't know if that's how {} is spelled.".format(let))
frat_code = "".join(codepoint_nums)
print(frat + ":" + frat_code)
def frat_de_unicoded(unicoded_frat):
# basically undoes the previous function for "decoding"
nums = unicoded_frat.split(",")
frat = []
for num in nums:
try:
letter = html.entities.codepoint2name[int(num)]
frat.append(letter)
except KeyError:
print("Yeah, {} is a bogus number.".format(num))
frat_name = "".join(frat)
print(frat_name)
if __name__ == "__main__":
whats_the_html5_name_for_punctuation()
frat_in_ISO('kappa lambda mu')
frat_in_ISO('Sigma Delta pie')
frat_in_unicode('phi beta Rho')
frat_in_unicode('omega alpa delta')
frat_de_unicoded('966,946,929')
frat_de_unicoded('969,948,666')
|
[
"jasonr.jones14@gmail.com"
] |
jasonr.jones14@gmail.com
|
e2c8348e317ff438a9f404126243a8fb3482855e
|
521efcd158f4c69a686ed1c63dd8e4b0b68cc011
|
/airflow/providers/cncf/kubernetes/utils/xcom_sidecar.py
|
a8c0ea4c1936fc29a359f1c5cef8e36444cbd5c0
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
coutureai/RaWorkflowOrchestrator
|
33fd8e253bfea2f9a82bb122ca79e8cf9dffb003
|
cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f
|
refs/heads/main
| 2022-10-01T06:24:18.560652
| 2021-12-29T04:52:56
| 2021-12-29T04:52:56
| 184,547,783
| 5
| 12
|
Apache-2.0
| 2022-11-04T00:02:55
| 2019-05-02T08:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module handles all xcom functionality for the KubernetesPodOperator
by attaching a sidecar container that blocks the pod from completing until
Airflow has pulled result data into the worker for xcom serialization.
"""
import copy
from kubernetes.client import models as k8s
class PodDefaults:
"""Static defaults for Pods"""
XCOM_MOUNT_PATH = '/airflow/xcom'
SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 1; done;'
VOLUME_MOUNT = k8s.V1VolumeMount(name='xcom', mount_path=XCOM_MOUNT_PATH)
VOLUME = k8s.V1Volume(name='xcom', empty_dir=k8s.V1EmptyDirVolumeSource())
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=['sh', '-c', XCOM_CMD],
image='alpine',
volume_mounts=[VOLUME_MOUNT],
resources=k8s.V1ResourceRequirements(
requests={
"cpu": "1m",
}
),
)
def add_xcom_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar"""
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
|
[
"noreply@github.com"
] |
coutureai.noreply@github.com
|
b6c6890770545affae43a687df491bc4228d1c6e
|
b030e97629ce909e60065fb061110d9b0818aee1
|
/501-600/561.Array Partition I.py
|
abc497d0d0fffc3804b7017701ae8180a7ace8ab
|
[] |
no_license
|
iscas-ljc/leetcode-easy
|
40325395b0346569888ff8c065cacec243cbac98
|
e38de011ce358c839851ccac23ca7af05a9d0b32
|
refs/heads/master
| 2021-01-20T09:27:07.650656
| 2017-11-22T03:07:47
| 2017-11-22T03:07:47
| 101,595,253
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
class Solution(object):
def arrayPairSum(self, nums):
nums.sort()
return sum(nums[::2])
#等价于return sum(sorted(nums)[::2])
#将数组从小到大排序,取下标为偶数的元素求和即为答案
|
[
"861218470@qq.com"
] |
861218470@qq.com
|
9f9440dbe47182b81654f1fac4c7832415b4ba21
|
29f7e80a31803eb196a623d0b75eb1cda47aea0d
|
/io_scene_bsp/__init__.py
|
e14db24d6b0879ce4f44de88b891b672937a0ccd
|
[
"MIT"
] |
permissive
|
Rikoshet-234/io_scene_bsp
|
3cd6eb15fd1cc1d663040567ea536ed8eb4ef956
|
68e2fa1210bebb212d1792f094634dd21b145e21
|
refs/heads/master
| 2020-04-29T05:34:02.733894
| 2019-03-14T21:25:19
| 2019-03-14T21:25:19
| 175,887,248
| 1
| 0
|
MIT
| 2019-03-15T20:32:36
| 2019-03-15T20:32:35
| null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
bl_info = {
'name': 'Quake engine BSP format',
'author': 'Joshua Skelton',
'version': (1, 0, 1),
'blender': (2, 80, 0),
'location': 'File > Import-Export',
'description': 'Load a Quake engine BSP file.',
'warning': '',
'wiki_url': '',
'support': 'COMMUNITY',
'category': 'Import-Export'}
__version__ = '.'.join(map(str, bl_info['version']))
if 'operators' in locals():
import importlib as il
il.reload(operators)
print('io_scene_bsp: reload ready')
else:
print('io_scene_bsp: ready')
def register():
from .operators import register
register()
def unregister():
from .operators import unregister
unregister()
if __name__ == '__main__':
from .operators import register
register()
|
[
"joshua.skelton@gmail.com"
] |
joshua.skelton@gmail.com
|
5e9bd9c20b1c09d125229d517891f7c7ec492ce5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_ottawas.py
|
85b54ee2bfb7c07f0824dea4c14fd7d0f039fc54
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#calss header
class _OTTAWAS():
def __init__(self,):
self.name = "OTTAWAS"
self.definitions = ottawa
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['ottawa']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d9c680c635056f20fa50c26d1c4e8ca06a8cc1f6
|
09a645cdd074638ab34790680bcb1122e3f5f48d
|
/python/GafferAppleseedUI/AppleseedAttributesUI.py
|
0a599834a1e4bb18444d8c89ca8b9f934d374628
|
[
"BSD-3-Clause"
] |
permissive
|
cedriclaunay/gaffer
|
65f2940d23f7bdefca5dcef7dc79ed46745969e8
|
56eebfff39b1a93fff871e291808db38ac41dbae
|
refs/heads/master
| 2021-01-22T02:48:29.334446
| 2015-01-26T17:16:15
| 2015-01-26T17:16:15
| 28,099,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,112
|
py
|
##########################################################################
#
# Copyright (c) 2014, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import string
import Gaffer
import GafferUI
import GafferAppleseed
def __visibilitySummary( plug ) :
info = []
for childName, label in (
( "camera", "Camera" ),
( "light", "Light" ),
( "shadow", "Shadow" ),
( "transparency", "Transparency" ),
( "probe", "Probe" ),
( "diffuse", "Diffuse" ),
( "specular", "Specular" ),
( "glossy", "Glossy" ),
) :
values = []
if plug[childName+"Visibility"]["enabled"].getValue() :
values.append( "On" if plug[childName+"Visibility"]["value"].getValue() else "Off" )
if values :
info.append( label + " : " + "/".join( values ) )
return ", ".join( info )
def __shadingSummary( plug ) :
info = []
if plug["shadingSamples"]["enabled"].getValue() :
info.append( "Shading Samples %d" % plug["shadingSamples"]["value"].getValue() )
return ", ".join( info )
def __alphaMapSummary( plug ) :
info = []
if plug["alphaMap"]["enabled"].getValue() :
info.append( "Alpha Map %s" % plug["alphaMap"]["value"].getValue() )
return ", ".join( info )
GafferUI.PlugValueWidget.registerCreator(
GafferAppleseed.AppleseedAttributes,
"attributes",
GafferUI.SectionedCompoundDataPlugValueWidget,
sections = (
{
"label" : "Visibility",
"summary" : __visibilitySummary,
"namesAndLabels" : (
( "as:visibility:camera", "Camera" ),
( "as:visibility:light", "Light" ),
( "as:visibility:shadow" , "Shadow" ),
( "as:visibility:transparency" , "Transparency" ),
( "as:visibility:probe" , "Probe" ),
( "as:visibility:diffuse", "Diffuse" ),
( "as:visibility:specular", "Specular" ),
( "as:visibility:glossy", "Glossy" ),
),
},
{
"label" : "Shading",
"summary" : __shadingSummary,
"namesAndLabels" : (
( "as:shading_samples", "Shading Samples" ),
),
},
{
"label" : "Alpha Map",
"summary" : __alphaMapSummary,
"namesAndLabels" : (
( "as:alpha_map", "Alpha Map" ),
),
},
),
)
GafferUI.PlugValueWidget.registerCreator(
GafferAppleseed.AppleseedAttributes,
"attributes.alphaMap.value",
lambda plug : GafferUI.PathPlugValueWidget( plug,
path = Gaffer.FileSystemPath( "/", filter = Gaffer.FileSystemPath.createStandardFilter() ),
pathChooserDialogueKeywords = {
"bookmarks" : GafferUI.Bookmarks.acquire( plug, category = "appleseed" ),
"leaf" : True,
},
),
)
|
[
"ramenhdr@gmail.com"
] |
ramenhdr@gmail.com
|
b99542f3a3322135a18969f2e1aa685b56bfa628
|
42c1dc42481ad4666c4ed87b42cee26d192116a5
|
/paraiso/hotel/migrations/0021_auto_20170615_1507.py
|
ac13fffdd102edad3ca303c9df8f774a4c2e0fe7
|
[] |
no_license
|
williamkblera/paraiso
|
f65ea9750fd0bc0fcc2454017a70945a10d72353
|
e786d2a2a41691b3870599c88859ca839d9299db
|
refs/heads/master
| 2021-01-23T00:35:33.542190
| 2017-12-11T20:24:26
| 2017-12-11T20:24:26
| 92,826,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 19:07
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('hotel', '0020_auto_20170615_1500'),
]
operations = [
migrations.AlterField(
model_name='reserva',
name='data_reserva',
field=models.DateTimeField(default=datetime.datetime(2017, 6, 15, 19, 7, 5, 164447, tzinfo=utc), verbose_name='Data da Reserva'),
),
]
|
[
"williamkblera@gmail.com"
] |
williamkblera@gmail.com
|
33088de85d1d21fb85db5ede234527249596c566
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_arm/source/scipy/special/tests/test_bdtr.py
|
57694becc49b2028f17eac819b80a225ac010795
|
[
"MIT",
"GPL-3.0-or-later",
"BSD-3-Clause",
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"GCC-exception-3.1",
"Python-2.0",
"Qhull",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,231
|
py
|
import numpy as np
import scipy.special as sc
import pytest
from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings
class TestBdtr:
def test(self):
val = sc.bdtr(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.25, 0.75, 1.0])
def test_rounding(self):
double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtr(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtr(-1.1, 1, 0.5)
assert np.isnan(val)
class TestBdtrc:
def test_value(self):
val = sc.bdtrc(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.75, 0.25, 0.0])
def test_rounding(self):
double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtrc(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtrc(-1.1, 1, 0.5)
val2 = sc.bdtrc(2.1, 1, 0.5)
assert np.isnan(val2)
assert_allclose(val, 1.0)
def test_bdtr_bdtrc_sum_to_one(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5)
vals = bdtr_vals + bdtrc_vals
assert_allclose(vals, [1.0, 1.0, 1.0])
class TestBdtri:
def test_value(self):
val = sc.bdtri(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtri([0, 1], 2, 0.5)
actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)])
assert_allclose(val, actual)
def test_rounding(self):
double_val = sc.bdtri([0.1, 1.1], 2, 0.5)
int_val = sc.bdtri([0, 1], 2, 0.5)
assert_allclose(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtri(k, n, p)
assert np.isnan(val)
@pytest.mark.parametrize('k, n, p', [
(-1.1, 1, 0.5),
(2.1, 1, 0.5)
])
def test_domain(self, k, n, p):
val = sc.bdtri(k, n, p)
assert np.isnan(val)
def test_bdtr_bdtri_roundtrip(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals)
assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan])
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
418b625b9dc9be8261cdeeedf0f8fb6c7ec8adb3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/13/usersdata/104/5684/submittedfiles/flipper.py
|
525dfd44a7e141261a476cc49e226822c90d857c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
#ENTRADA
p=input('Determine a posição de p:')
r=input('Determine a posição de r:')
#PROCESSAMENTO
if p==0:
print('C')
else:
if r==0:
print('B')
else:
print('A')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0bdea361b10a4f3475f4dc9966169daced84f42c
|
0b767d1516ff77f62431f7464fb11b4e747b4a5a
|
/src/okok.py
|
c20649bc9a51ee921ebbfcdfd0c5062ea101c110
|
[
"BSD-2-Clause"
] |
permissive
|
se4ai/code
|
1429f6c2e649cad1b42323cb1cf0deded5cf23a0
|
e2ac87c48863a471459d6aabc67ebdc1c96f440e
|
refs/heads/master
| 2020-05-23T17:45:14.567820
| 2019-08-06T13:56:27
| 2019-08-06T13:56:27
| 186,873,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
from ok import ok
@ok
def ok1():
"This will always fail."
assert 2==1, "oops"
@ok
def ok2():
"This will always pass."
n = sum([1,2,3,4])
assert n==10, "should not fail"
if __name__ == "__main__": ok()
|
[
"tim.menzies@gmail.com"
] |
tim.menzies@gmail.com
|
4c30510bd6ce2bb79440bcadd772954fbe1cd46a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/allergies/56c0c9b37bf84ea598db6cbc74fd8ebe.py
|
1ef0988f5a8722518177c195c0c49f2735807e69
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
class Allergies(object):
allergies = [
"eggs",
"peanuts",
"shellfish",
"strawberries",
"tomatoes",
"chocolate",
"pollen",
"cats"
]
def __init__(self, score):
score = score & 0xff
self.list = [
self.allergies[b]
for b in xrange(8)
if score & (1 << b)
]
def is_allergic_to(self, allergy):
return allergy in self.list
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
348d9b7b8309079a4c69ee619bc7bf6d819d36c4
|
eb3683f9127befb9ef96d8eb801206cf7b84d6a7
|
/stypy/sgmc/sgmc_cache/testing/test_programs/numpy/basic_numpy/functions/numpy_abs.py
|
eb6a5f64cfb4c351a781f911bb2dd4dd546d5b68
|
[] |
no_license
|
ComputationalReflection/stypy
|
61ec27333a12f76ac055d13f8969d3e0de172f88
|
be66ae846c82ac40ba7b48f9880d6e3990681a5b
|
refs/heads/master
| 2021-05-13T18:24:29.005894
| 2018-06-14T15:42:50
| 2018-06-14T15:42:50
| 116,855,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,858
|
py
|
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # http://www.labri.fr/perso/nrougier/teaching/numpy.100/
2:
3: import numpy as np
4:
5: Z = np.arange(100)
6: v = np.random.uniform(0,100)
7: index = (np.abs(Z-v)).argmin()
8: e = Z[index]
9:
10: # l = globals().copy()
11: # for v in l:
12: # print ("'" + v + "'" + ": instance_of_class_name(\"" + type(l[v]).__name__ + "\"),")
13:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 3, 0))
# 'import numpy' statement (line 3)
update_path_to_current_file_folder('C:/Users/redon/PycharmProjects/stypyV2/testing//test_programs/numpy/basic_numpy/functions/')
import_1 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'numpy')
if (type(import_1) is not StypyTypeError):
if (import_1 != 'pyd_module'):
__import__(import_1)
sys_modules_2 = sys.modules[import_1]
import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'np', sys_modules_2.module_type_store, module_type_store)
else:
import numpy as np
import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'np', numpy, module_type_store)
else:
# Assigning a type to the variable 'numpy' (line 3)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 3, 0), 'numpy', import_1)
remove_current_file_folder_from_path('C:/Users/redon/PycharmProjects/stypyV2/testing//test_programs/numpy/basic_numpy/functions/')
# Assigning a Call to a Name (line 5):
# Call to arange(...): (line 5)
# Processing the call arguments (line 5)
int_5 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 5, 14), 'int')
# Processing the call keyword arguments (line 5)
kwargs_6 = {}
# Getting the type of 'np' (line 5)
np_3 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 5, 4), 'np', False)
# Obtaining the member 'arange' of a type (line 5)
arange_4 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 5, 4), np_3, 'arange')
# Calling arange(args, kwargs) (line 5)
arange_call_result_7 = invoke(stypy.reporting.localization.Localization(__file__, 5, 4), arange_4, *[int_5], **kwargs_6)
# Assigning a type to the variable 'Z' (line 5)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 5, 0), 'Z', arange_call_result_7)
# Assigning a Call to a Name (line 6):
# Call to uniform(...): (line 6)
# Processing the call arguments (line 6)
int_11 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 6, 22), 'int')
int_12 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 6, 24), 'int')
# Processing the call keyword arguments (line 6)
kwargs_13 = {}
# Getting the type of 'np' (line 6)
np_8 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 6, 4), 'np', False)
# Obtaining the member 'random' of a type (line 6)
random_9 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 6, 4), np_8, 'random')
# Obtaining the member 'uniform' of a type (line 6)
uniform_10 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 6, 4), random_9, 'uniform')
# Calling uniform(args, kwargs) (line 6)
uniform_call_result_14 = invoke(stypy.reporting.localization.Localization(__file__, 6, 4), uniform_10, *[int_11, int_12], **kwargs_13)
# Assigning a type to the variable 'v' (line 6)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'v', uniform_call_result_14)
# Assigning a Call to a Name (line 7):
# Call to argmin(...): (line 7)
# Processing the call keyword arguments (line 7)
kwargs_23 = {}
# Call to abs(...): (line 7)
# Processing the call arguments (line 7)
# Getting the type of 'Z' (line 7)
Z_17 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 16), 'Z', False)
# Getting the type of 'v' (line 7)
v_18 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 18), 'v', False)
# Applying the binary operator '-' (line 7)
result_sub_19 = python_operator(stypy.reporting.localization.Localization(__file__, 7, 16), '-', Z_17, v_18)
# Processing the call keyword arguments (line 7)
kwargs_20 = {}
# Getting the type of 'np' (line 7)
np_15 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 9), 'np', False)
# Obtaining the member 'abs' of a type (line 7)
abs_16 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 7, 9), np_15, 'abs')
# Calling abs(args, kwargs) (line 7)
abs_call_result_21 = invoke(stypy.reporting.localization.Localization(__file__, 7, 9), abs_16, *[result_sub_19], **kwargs_20)
# Obtaining the member 'argmin' of a type (line 7)
argmin_22 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 7, 9), abs_call_result_21, 'argmin')
# Calling argmin(args, kwargs) (line 7)
argmin_call_result_24 = invoke(stypy.reporting.localization.Localization(__file__, 7, 9), argmin_22, *[], **kwargs_23)
# Assigning a type to the variable 'index' (line 7)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 7, 0), 'index', argmin_call_result_24)
# Assigning a Subscript to a Name (line 8):
# Obtaining the type of the subscript
# Getting the type of 'index' (line 8)
index_25 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 8, 6), 'index')
# Getting the type of 'Z' (line 8)
Z_26 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 8, 4), 'Z')
# Obtaining the member '__getitem__' of a type (line 8)
getitem___27 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 8, 4), Z_26, '__getitem__')
# Calling the subscript (__getitem__) to obtain the elements type (line 8)
subscript_call_result_28 = invoke(stypy.reporting.localization.Localization(__file__, 8, 4), getitem___27, index_25)
# Assigning a type to the variable 'e' (line 8)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 8, 0), 'e', subscript_call_result_28)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
[
"redondojose@uniovi.es"
] |
redondojose@uniovi.es
|
b81f349b219ecfb366970a6ddf21bfdcdcad34a5
|
71894f980d1209017837d7d02bc38ffb5dbcb22f
|
/audio/AlexaWithRaspioProHat/AlexaPi/main.py
|
9d5d327ba169eb237d036b14e4e26a54db885dad
|
[
"MIT"
] |
permissive
|
masomel/py-iot-apps
|
0f2418f8d9327a068e5db2cdaac487c321476f97
|
6c22ff2f574a37ba40a02625d6ed68d7bc7058a9
|
refs/heads/master
| 2021-03-22T04:47:59.930338
| 2019-05-16T06:48:32
| 2019-05-16T06:48:32
| 112,631,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,211
|
py
|
# msm: source - http://www.instructables.com/id/Build-an-Alexa-With-Raspio-Pro-Hat-and-Raspberry-P/?ALLSTEPS edits original
#! /usr/bin/env python
import os
import random
import time
import RPi.GPIO as GPIO
import alsaaudio
import wave
import random
from creds import *
import requests
import json
import re
from memcache import Client
#Settings
button = 18 #GPIO Pin with button connected
lights = [24, 25, 27] # GPIO Pins with LED's conneted
device = "plughw:1" # Name of your microphone/soundcard in arecord -L
#Setup
recorded = False
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
def internet_on():
print("Checking Internet Connection")
try:
r =requests.get('https://api.amazon.com/auth/o2/token')
print("Connection OK")
return True
except:
print("Connection Failed")
return False
def gettoken():
token = mc.get("access_token")
refresh = refresh_token
if token:
return token
elif refresh:
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "refresh_token" : refresh, "grant_type" : "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
resp = json.loads(r.text)
mc.set("access_token", resp['access_token'], 3570)
return resp['access_token']
else:
return False
def alexa():
GPIO.output(27, GPIO.LOW) #blue light out
GPIO.output(24, GPIO.HIGH)
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % gettoken()}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
with open(path+'recording.wav') as inf:
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', inf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
if r.status_code == 200:
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
with open(path+"response.mp3", 'wb') as f:
f.write(audio)
GPIO.output(25, GPIO.LOW)
os.system('mpg123 -q {}1sec.mp3 {}response.mp3'.format(path, path))
GPIO.output(24, GPIO.LOW)
else:
GPIO.output(lights, GPIO.LOW)
for x in range(0, 3):
time.sleep(.2)
GPIO.output(25, GPIO.HIGH)
time.sleep(.2)
GPIO.output(lights, GPIO.LOW)
GPIO.output(27, GPIO.HIGH) #blue light on
def start():
GPIO.output(27, GPIO.HIGH) #blue light
last = GPIO.input(button)
while True:
val = GPIO.input(button)
if val != last:
GPIO.output(27, GPIO.LOW) #blue light out
last = val
if val == 1 and recorded == True:
rf = open(path+'recording.wav', 'w')
rf.write(audio)
rf.close()
inp = None
alexa()
elif val == 0:
GPIO.output(25, GPIO.HIGH)
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, device)
inp.setchannels(1)
inp.setrate(16000)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(500)
audio = ""
l, data = inp.read()
if l:
audio += data
recorded = True
elif val == 0:
l, data = inp.read()
if l:
audio += data
if __name__ == "__main__":
try:
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(lights, GPIO.OUT)
GPIO.output(lights, GPIO.LOW)
while internet_on() == False:
print(".")
token = gettoken()
os.system('mpg123 -q {}1sec.mp3 {}hello.mp3'.format(path, path))
for x in range(0, 3):
time.sleep(.1)
GPIO.output(24, GPIO.HIGH)
time.sleep(.1)
GPIO.output(24, GPIO.LOW)
start()
except KeyboardInterrupt:
GPIO.cleanup()
print(" clean program exit.")
pass
|
[
"msmelara@gmail.com"
] |
msmelara@gmail.com
|
ca4a09119aeb8e0bf90846f2387285fcd2f58815
|
008ea0c503829f33840495373ad3d60794575af3
|
/source/sublime/oop/o12.py
|
95dae399b2627636d23c46aad39907c93034e366
|
[] |
no_license
|
JyHu/PYStudy
|
6515bea47ca6f80e336f3b6a7a14b1159fde872f
|
ec0855c414237bdd7d0cb28f79a81c02ccd52d45
|
refs/heads/master
| 2016-08-12T19:44:06.723361
| 2016-04-11T10:38:59
| 2016-04-11T10:38:59
| 45,384,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
#
# coding:utf-8
#
'''
除了使用 type() 动态创建类以外,要控制类的创建行为,还可以使用 metaclass
'''
__author__ = 'JyHu'
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value : self.append(value)
return type.__new__(cls, name, bases, attrs)
class MyList(list, metaclass = ListMetaclass):
pass
L = MyList()
L.add(1)
print(L)
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
print('Found model: %s' % name)
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping : %s ==> %s' % (k, v))
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs['__mappings__'] = mappings
attrs['__table__'] = name
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass = ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id = 12345, name = 'Michael', email = 'test@orm.org', password = 'my-pwd')
u.save()
|
[
"auu.aug@gmail.com"
] |
auu.aug@gmail.com
|
3807d388af745242e706f2bb498ca4887e7d8ad5
|
ecd4b06d5d5368b71fd72a1c2191510a03b728fd
|
/6 - introduction to databases in python/count of Records by State.py
|
1a718a86aaba2d5c28da2d05dd2855263e57b0c8
|
[
"MIT"
] |
permissive
|
Baidaly/datacamp-samples
|
86055db5e326b59bfdce732729c80d76bf44629e
|
37b4f78a967a429e0abca4a568da0eb9d58e4dff
|
refs/heads/master
| 2022-07-27T01:18:00.700386
| 2022-07-18T19:27:23
| 2022-07-18T19:27:23
| 123,827,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
'''
Often, we want to get a count for each record with a particular value in another column. The .group_by() method helps answer this type of query. You can pass a column to the .group_by() method and use in an aggregate function like sum() or count(). Much like the .order_by() method, .group_by() can take multiple columns as arguments.
'''
# Import func
from sqlalchemy import func
# Build a query to select the state and count of ages by state: stmt
stmt = select([census.columns.state, func.count(census.columns.age)])
# Group stmt by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys())
|
[
"daulet.urazalinov@uptake.com"
] |
daulet.urazalinov@uptake.com
|
71a1987f65749e123abe8d4ab519826b34bf172a
|
bec8f235b1392542560166dd02c2f0d88c949a24
|
/examples/twisted/wamp1/rpc/simple/example2/server.py
|
2e21dcdeffdb7ac18f40f3c1c3790e7731539144
|
[
"Apache-2.0"
] |
permissive
|
gourneau/AutobahnPython
|
f740f69b9ecbc305a97a5412ba3bb136a4bdec69
|
5193e799179c2bfc3b3f8dda86ccba69646c7ee3
|
refs/heads/master
| 2021-01-15T22:02:32.459491
| 2014-07-02T13:34:57
| 2014-07-02T13:34:57
| 21,437,288
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,564
|
py
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, math
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp1.protocol import exportRpc, \
WampServerFactory, \
WampServerProtocol
class Calc:
"""
A simple calc service we will export for Remote Procedure Calls (RPC).
All you need to do is use the @exportRpc decorator on methods
you want to provide for RPC and register a class instance in the
server factory (see below).
The method will be exported under the Python method name, or
under the (optional) name you can provide as an argument to the
decorator (see asyncSum()).
"""
@exportRpc
def add(self, x, y):
return x + y
@exportRpc
def sub(self, x, y):
return x - y
@exportRpc
def square(self, x):
MAX = 1000
if x > MAX:
## raise a custom exception
raise Exception("http://example.com/error#number_too_big",
"%d too big for me, max is %d" % (x, MAX),
MAX)
return x * x
@exportRpc
def sum(self, list):
return reduce(lambda x, y: x + y, list)
@exportRpc
def pickySum(self, list):
errs = []
for i in list:
if i % 3 == 0:
errs.append(i)
if len(errs) > 0:
raise Exception("http://example.com/error#invalid_numbers",
"one or more numbers are multiples of 3",
errs)
return reduce(lambda x, y: x + y, list)
@exportRpc
def sqrt(self, x):
return math.sqrt(x)
@exportRpc("asum")
def asyncSum(self, list):
## Simulate a slow function.
d = defer.Deferred()
reactor.callLater(3, d.callback, self.sum(list))
return d
class SimpleServerProtocol(WampServerProtocol):
"""
Demonstrates creating a simple server with Autobahn WebSockets that
responds to RPC calls.
"""
def onSessionOpen(self):
# when connection is established, we create our
# service instances ...
self.calc = Calc()
# .. and register them for RPC. that's it.
self.registerForRpc(self.calc, "http://example.com/simple/calc#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = SimpleServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
62aa187e75d0d640cc2d69a224a102a0cafca5fc
|
0179a8c11f51d89cc962c7d9249203ff0e67e405
|
/shell/shell_contract.py
|
1238844b436bb74f76efe8dda6e11fca6a4f0c77
|
[
"MIT"
] |
permissive
|
paulo-romano/orcamentos
|
7033637065c39c457a59b53eab215234f7d5b85a
|
dc87fd2736e9f8262ed775bf9160d1e21eb1684a
|
refs/heads/master
| 2021-01-15T22:14:41.595934
| 2016-02-16T23:02:59
| 2016-02-16T23:02:59
| 51,551,215
| 1
| 0
| null | 2016-02-11T22:16:43
| 2016-02-11T22:16:43
| null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from random import choice
from django.db import IntegrityError
from orcamentos.core.models import Contract, Proposal, Customer
REPEAT = Proposal.objects.filter(status='a')
for i in REPEAT:
proposal = Proposal.objects.get(pk=i.pk)
contractor = Customer.objects.get(pk=proposal.work.customer.pk)
try:
Contract.objects.create(
proposal=proposal,
contractor=contractor,
is_canceled=choice((True, False)))
except IntegrityError:
print('Registro existente.')
|
[
"rg3915@yahoo.com.br"
] |
rg3915@yahoo.com.br
|
5739e561cd3360ae20088b5f64ed45c14b854723
|
cfb44550355ea3c36e610d3f1eb75d8dcbdc8ebe
|
/strawberry/setup.py
|
62732a79141f77a94d9dc5da70b6e018a0858ff8
|
[
"Apache-2.0"
] |
permissive
|
KiritoDv/blueberry
|
bd0dc38bfe16622693efd8ff4a31368b4dbeb4ac
|
a47feeb3e944d44b9f2af4661f6c409f51fbabd6
|
refs/heads/master
| 2023-01-18T19:06:00.074071
| 2020-11-24T23:04:09
| 2020-11-24T23:04:09
| 314,765,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from distutils.core import setup, Extension
module1 = Extension('strawberry', sources = ['straw.c'])
setup (name = 'strawberry',
version = '1.0',
description = 'Strawberry miniaudio wrapper',
ext_modules = [module1])
|
[
"alex@pop-os.localdomain"
] |
alex@pop-os.localdomain
|
cf8df78c19fed7972b683782a743137388fcee12
|
6b518cf14ea3f59fd59136dbd2a7ac70234bb96e
|
/pspipe.py
|
4523f7e32db887641957d2c80753873e9e831bcc
|
[] |
no_license
|
simula67/advanced-python-course-material
|
8064a1adddff45b0980d4bd1948fdeb2f88aec89
|
98870da337cbc001bcf4215ce44f82f0430fd3ce
|
refs/heads/master
| 2016-09-06T12:29:37.397321
| 2015-06-29T05:10:19
| 2015-06-29T05:10:19
| 38,228,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
__author__ = 'antonjoj'
import subprocess
cat = subprocess.Popen('type datafiles\\passwd', shell=True, stdout=subprocess.PIPE)
find = subprocess.Popen('find \"root\"', stdout=subprocess.PIPE, shell=True, stdin=cat.stdout)
for line in find.communicate():
if line:
print line
|
[
"simula67@gmail.com"
] |
simula67@gmail.com
|
9cf98b7b4745bf18117c0e68108e370d4226cd25
|
24e21c68bc2c4f1c3f58b96ae13512968a919024
|
/memoryAndMulti/threadDemo.py
|
557b1eb374bb59d12ee08ff31de2c68f27abdcf2
|
[] |
no_license
|
maketubu7/spiderDemo
|
0308e88815c2035fa33acd1c4ca85329d2435034
|
9c5e78fdafba37a08e51c2e988c54957feed5b0f
|
refs/heads/master
| 2021-02-09T18:43:32.493539
| 2020-11-11T09:13:21
| 2020-11-11T09:13:21
| 244,314,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/2/27 0:10
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : threadDemo.py
# @Software: PyCharm
from threading import Thread
import threading,time
from typing import Optional
def loop():
print(threading.current_thread().name)
n = 0
while n < 5:
print(n)
n += 1
def use_thread():
print(threading.current_thread().name)
t = Thread(target=loop,name='loop_thread')
##启动
t.start()
##挂起
t.join()
class my_thread(Thread):
def __init__(self):
super(my_thread,self).__init__()
self.n = 0
def run(self):
while self.n < 5:
print(self.n)
print(threading.current_thread().name)
time.sleep(1)
self.n += 1
if __name__ == "__main__":
# use_thread()
t = my_thread()
t.start()
t.join()
|
[
"601176930@qq.com"
] |
601176930@qq.com
|
fda1f90a4be88c7944f2879764d5c153faed9cb0
|
c57439f0c98af370ace65f9d55ef5a457bedc531
|
/ydk/models/ipv6/Cisco_IOS_XR_ipv6_ma_subscriber_cfg.py
|
a66d84f0a2924a7e9df63458243f00228eb1dd1d
|
[
"Apache-2.0"
] |
permissive
|
myahmao/ydk-py
|
c932fbd8245e554227cce0fd723d9a22887b0c40
|
2f367d93f2088d4abdc2f2bb10ca4864952b458a
|
refs/heads/master
| 2021-01-14T11:32:29.064494
| 2016-03-15T22:44:05
| 2016-03-15T22:44:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
""" Cisco_IOS_XR_ipv6_ma_subscriber_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv6\-ma\-subscriber package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-subscriber\-infra\-tmplmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYDataValidationError
class Ipv6ReachableVia_Enum(Enum):
"""
Ipv6ReachableVia_Enum
Ipv6 reachable via
"""
"""
Source is reachable via interface on which
packet was received
"""
RECEIVED = 1
@staticmethod
def _meta_info():
from ydk.models.ipv6._meta import _Cisco_IOS_XR_ipv6_ma_subscriber_cfg as meta
return meta._meta_table['Ipv6ReachableVia_Enum']
|
[
"manradha@cisco.com"
] |
manradha@cisco.com
|
a55f91c3b4e428b323ddb4834febff18bff53cb7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02818/s319321320.py
|
ec787c89f517dd3576a0c30e3d24e3bf48cf1b60
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# ABC149
# B Greesy Takahashi
# takはA枚、aokiはB枚、TAKはK回
a, b, k = map(int, input().split())
if k > a:
if k - a > b:
print(0,0)
else:
print(0,b - (k - a))
else:
print(a-k,b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4312c5132af6818ca35ed0f704d81bfac2ddb825
|
5963c12367490ffc01c9905c028d1d5480078dec
|
/tests/components/wallbox/test_config_flow.py
|
6b5a05a3486830b64b8d0d53f7b409dfb288bb79
|
[
"Apache-2.0"
] |
permissive
|
BenWoodford/home-assistant
|
eb03f73165d11935e8d6a9756272014267d7d66a
|
2fee32fce03bc49e86cf2e7b741a15621a97cce5
|
refs/heads/dev
| 2023-03-05T06:13:30.354545
| 2021-07-18T09:51:53
| 2021-07-18T09:51:53
| 117,122,037
| 11
| 6
|
Apache-2.0
| 2023-02-22T06:16:51
| 2018-01-11T16:10:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,028
|
py
|
"""Test the Wallbox config flow."""
import json
from unittest.mock import patch
import requests_mock
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.wallbox import InvalidAuth, config_flow
from homeassistant.components.wallbox.const import DOMAIN
from homeassistant.core import HomeAssistant
test_response = json.loads(
'{"charging_power": 0,"max_available_power": 25,"charging_speed": 0,"added_range": 372,"added_energy": 44.697}'
)
async def test_show_set_form(hass: HomeAssistant) -> None:
"""Test that the setup form is served."""
flow = config_flow.ConfigFlow()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.wallbox.config_flow.WallboxHub.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_authenticate(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=403,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=403,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=404,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_validate_input(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=200,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["title"] == "Wallbox Portal"
assert result2["data"]["station"] == "12345"
|
[
"noreply@github.com"
] |
BenWoodford.noreply@github.com
|
b7b8ce02d0aba506b2683b3c8862f61ba4fd4293
|
9095c1a0da8c6ffe914ee6dd9c4708062fd95c9a
|
/vtpl_api/models/source_type.py
|
99b3143d277011d407f04a5955fab602b32550ca
|
[
"MIT"
] |
permissive
|
vtpl1/vtpl_api_py
|
2e5338bd08677f12fc7304fb6ac7a32f32af1c93
|
d289c92254deb040de925205c583de69802a1c6b
|
refs/heads/master
| 2020-09-10T23:34:21.828350
| 2019-11-15T07:26:53
| 2019-11-15T07:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
# coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class SourceType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""SourceType - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SourceType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"monotosh.das@videonetics.com"
] |
monotosh.das@videonetics.com
|
29cd5aa3c4e1875cf4d2d691c2218d861a2d333c
|
7e4460c85790fae2d470182732289bcd1b8777b2
|
/Process/process_meshes.py
|
1ea42ad249869c9afd8713ee9ab0cb63fbd9752a
|
[] |
no_license
|
khamukkamu/swconquest-msys
|
5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e
|
71337a4ae9c507b9440e84cf49d31fc67a781978
|
refs/heads/master
| 2021-04-29T19:00:10.389224
| 2019-05-01T15:11:11
| 2019-05-01T15:11:11
| 121,704,753
| 1
| 1
| null | 2018-02-16T01:40:58
| 2018-02-16T01:40:58
| null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import string
from header_common import *
from module_info import *
from module_meshes import *
from process_common import *
from process__swyhelper import *
def save_meshes():
ofile = open(export_dir + "meshes.txt","w")
ofile.write("%d\n"%len(meshes))
for i_mesh in xrange(len(meshes)):
mesh = meshes[i_mesh]
ofile.write("mesh_%s %d %s %s %s %s %s %s %s %s %s %s\n"%(mesh[0],mesh[1],replace_spaces(mesh[2]),swytrailzro(mesh[3]),swytrailzro(mesh[4]),swytrailzro(mesh[5]),swytrailzro(mesh[6]),swytrailzro(mesh[7]),swytrailzro(mesh[8]),swytrailzro(mesh[9]),swytrailzro(mesh[10]),swytrailzro(mesh[11])))
ofile.close()
def save_python_header():
if (wb_compile_switch):
ofile = open("./IDs/ID_meshes_wb.py","w")
else:
ofile = open("./IDs/ID_meshes_mb.py","w")
for i_mesh in xrange(len(meshes)):
ofile.write("mesh_%s = %d\n"%(meshes[i_mesh][0],i_mesh))
ofile.write("\n\n")
ofile.close()
print "Exporting meshes..."
save_python_header()
save_meshes()
|
[
"swyterzone@gmail.com"
] |
swyterzone@gmail.com
|
88f88a537c87284e71ef254d24a05d22fc3a9233
|
6a928130337dafece1a6158badd00d1d46571003
|
/reportForm/wsgi.py
|
28a489cea41932132be6da890e260ca78c6ee72b
|
[] |
no_license
|
Yanl05/reportForm
|
bb5a36cff3fac3aca76b5bc50c92fe54282250a8
|
45a915b29102c1f49035df93217782ea563cdb9f
|
refs/heads/master
| 2023-04-18T00:40:19.355040
| 2021-04-29T14:37:59
| 2021-04-29T14:37:59
| 362,485,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for untitled project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportForm.settings')
application = get_wsgi_application()
|
[
"756593069@qq.com"
] |
756593069@qq.com
|
6a7bd840b05232033b4479a414b2dba8cac470bb
|
d2fae2d0ff36fde8d8402bdac1de5b6760f050b7
|
/app/tests/Test_passwordchecker.py
|
031f23e09f40532aa833df7d554126e8cd5b2beb
|
[] |
no_license
|
DennisMufasa/mongodb-flask_app
|
8701d817d757a5144b9a98ba4293a948c537b6c5
|
53c3447850d16d630428a020fe28949ff84c4a03
|
refs/heads/master
| 2022-12-09T11:31:59.085865
| 2020-08-31T02:32:57
| 2020-08-31T02:32:57
| 260,714,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# third-party import
import unittest
# local import
from ..api.v1.models.utils import password_checker
class Test_Password_checker(unittest.TestCase):
def test_password_len(self):
password_check1 = password_checker('boo')
password_check2 = password_checker('lysergicaciddyethylammide')
self.assertEqual(password_check1, 'password too short')
self.assertEqual(password_check2, 'password too long')
if __name__ == "__main__":
unittest.main()
|
[
"denny.muasa@gmail.com"
] |
denny.muasa@gmail.com
|
faa2e47e01b26f98eb24501a23c59d2dd2f3081a
|
70bc77336e4544031ad7d7d29a2e964ef2626076
|
/base/models.py
|
bf4ba34fec4fc78262b81397124b4041d26e64fd
|
[] |
no_license
|
DronMDF/vanadis
|
9af7a8c9281bf0eb17df593f5c9fc9345e474612
|
de692207bbd127c5a9952e3144653492a0ba969f
|
refs/heads/master
| 2020-04-17T08:11:18.411429
| 2016-12-21T20:50:05
| 2016-12-21T20:50:05
| 66,539,179
| 1
| 0
| null | 2016-12-21T20:50:06
| 2016-08-25T08:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 654
|
py
|
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100, db_index=True)
repo_url = models.CharField(max_length=256, null=True)
class Object(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
oid = models.BigIntegerField(db_index=True)
issues_count = models.IntegerField()
class Issue(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
object = models.ForeignKey(Object, on_delete=models.CASCADE, db_index=True)
line = models.IntegerField()
position = models.IntegerField()
text = models.CharField(max_length=256)
|
[
"dron.valyaev@gmail.com"
] |
dron.valyaev@gmail.com
|
8b09a98c3ac1acf69e5c84f6bbeeb54671c20bc6
|
11ce41733d6f31153fe14f800c9dd0be18615862
|
/news/admin.py
|
50285420a545e93e7a3d322e73e11bb5a4d627f4
|
[
"MIT"
] |
permissive
|
techacademypython/django_image_crop_views
|
6ff6731944f5d09721452a71b0745089d1b035ef
|
2f9c51ae80705dc23607e157baa4f5767957a2f1
|
refs/heads/master
| 2023-05-05T13:12:23.642970
| 2019-09-03T16:38:24
| 2019-09-03T16:38:24
| 206,105,932
| 0
| 0
|
MIT
| 2022-11-22T04:13:41
| 2019-09-03T15:07:05
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
from django.contrib import admin
from image_cropping import ImageCroppingMixin
# Register your models here.
from news.models import NewsModel
class NewsModelAdmin(ImageCroppingMixin, admin.ModelAdmin):
readonly_fields = ["preview_count"]
fields = [
"image", "name", "text", "cropping", "preview_count"
]
admin.site.register(NewsModel, NewsModelAdmin)
|
[
"munisisazade@gmail.com"
] |
munisisazade@gmail.com
|
67dccdaf388e326388afec57b7acdf38c78908a9
|
eba0e40667d6082b5eeefdbaf2862e3f02fd774c
|
/mr_utils/sim/ssfp/quantitative_field_mapping.py
|
44a85af73a56bb265904c32bd1da3b6aaf216bbc
|
[] |
no_license
|
zongjg/mr_utils
|
a0ec98ed2d03a6d52d81be8ef108993f92baeee1
|
08cb43dcf53fd6fddd3304e3514a608842310a34
|
refs/heads/master
| 2022-01-04T16:25:41.065177
| 2019-05-11T20:20:22
| 2019-05-11T20:20:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,355
|
py
|
'''Quantitative field mapping for bSSFP.
Collect quantitative MR maps (T1, T2, flip angle), then, assuming that these
won't change during the duration of the scan, we can use these to take a single
bSSFP scan each time point and solve for the off-resonance. Thus we get a
field map at time point.
'''
import numpy as np
from mr_utils.utils import find_nearest
from mr_utils.sim.ssfp import ssfp
# from mr_utils import view
def get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs):
'''Simulate bSSFP response across all possible off-resonances.
Parameters
==========
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
dfs : float
Off-resonance values to simulate over.
Returns
=======
resp : array_like
Frequency response of SSFP signal across entire spectrum.
'''
# Feed ssfp sim an array of parameters to be used with all the df values
T1s = np.ones(dfs.shape)*T1
T2s = np.ones(dfs.shape)*T2
PDs = np.ones(dfs.shape)*PD
resp = ssfp(T1s, T2s, TR, alpha, dfs, phase_cyc=phase_cyc, M0=PDs)
# Returns a vector of simulated Mxy with index corresponding to dfs
return resp
def quantitative_fm_scalar(Mxy, dfs, T1, T2, PD, TR, alpha, phase_cyc):
'''For scalar T1, T2, PD.
Parameters
==========
Mxy : float
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
Returns
=======
float
Off-resonace value that most closely matches Mxy prior.
'''
# Simulate over the total range of off-resonance values
resp = get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs)
# Find the response that matches Mxy most closely
idx, _val = find_nearest(resp, Mxy)
# Return the df's value, because that's really what the caller wanted
return dfs[idx]
def quantitative_fm(Mxys, dfs, T1s, T2s, PDs, TR, alpha, phase_cyc, mask=None):
'''Find field map given quantitative maps.
Parameters
==========
Mxys : array_like
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1s : array_like
scalar T1 longitudinal recovery value in seconds.
T2s : array_like
scalar T2 transverse decay value in seconds.
PDs : array_like
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
mask : array_like
Boolean mask to tell which pixels we should compute df for.
Returns
=======
fm : array_like
Field map.
'''
resps = {}
orig_size = np.asarray(T1s).shape
if mask is None:
mask = np.ones(Mxys.shape)
Mxys = np.asarray(Mxys).flatten()
T1s = np.asarray(T1s).flatten()
T2s = np.asarray(T2s).flatten()
PDs = np.asarray(PDs).flatten()
mask = np.asarray(mask).flatten()
fm = np.zeros(Mxys.size)
for ii in range(Mxys.size):
if mask[ii]:
# Cache results for later in case we come across the same T1,T2,PD
if (PDs[ii], T1s[ii], T2s[ii]) not in resps:
resps[(PDs[ii], T1s[ii], T2s[ii])] = get_df_responses(
T1s[ii], T2s[ii], PDs[ii], TR, alpha, phase_cyc, dfs)
# Find the appropriate off-resonance value for this T1,T2,PD,Mxy
idx, _val = find_nearest(
resps[(PDs[ii], T1s[ii], T2s[ii])], Mxys[ii])
fm[ii] = dfs[idx]
else:
fm[ii] = 0
return fm.reshape(orig_size)
|
[
"nicholas.bgp@gmail.com"
] |
nicholas.bgp@gmail.com
|
a4e0c192f3c8f4463eae05876b00114d00ab91c7
|
8ce23f191870868c86c7616882e6043b1102cb0d
|
/tools/text_processing/join_files_on_column_fuzzy/join_files_on_column_fuzzy.py
|
1e19f1dcfe4a4d3ab0743078894f5c196b0b2559
|
[] |
no_license
|
StevenVerbruggen/galaxytools
|
56f99d0d629cb6d9e3db290c64f30b920de04f26
|
7d7365197e2cba2eb048121c9f0ee5546f06c520
|
refs/heads/master
| 2021-01-16T17:51:39.721403
| 2020-12-01T08:35:51
| 2020-12-01T08:35:51
| 100,017,016
| 0
| 0
| null | 2017-08-11T09:42:20
| 2017-08-11T09:42:20
| null |
UTF-8
|
Python
| false
| false
| 4,755
|
py
|
#!/usr/bin/env python
import os
import argparse
import sys
def main(args):
if args.header:
h1 = True
h2 = True
else:
h1 = False
h2 = False
cache = list()
out = open(args.outfile, 'w+')
write_buffer = list()
def _readline(header = False):
with open(args.f2) as handle2:
for line in handle2:
line = line.strip()
if header:
header = False
yield line
continue
if not line:
continue
columns = line.split(args.sep)
value2 = columns[args.c2-1]
yield columns, float(value2)
def fill_cache():
try:
cache.append(next(it))
except StopIteration:
pass
it = _readline(header = h2)
with open(args.f1) as handle1:
for line in handle1:
line = line.strip()
if h1:
h1 = False
seconda_header = next(it)
if args.add_distance:
out.write('%s\t%s\t%s\n' % (line, seconda_header, args.unit))
else:
out.write('%s\t%s\n' % (line, seconda_header))
continue
if not line:
continue
columns = line.split(args.sep)
value1 = float(columns[args.c1-1])
_cache = list()
fill_cache()
while cache:
_c, value2 = cache.pop(0)
upper_bound = value1 + args.distance
if args.unit == 'absolute':
if value2 <= upper_bound and value2 >= (value1 - args.distance):
line_template = '%s\n'
abs_dist = abs(value1 - value2)
if args.add_distance:
line_template = '%s\t' + str(abs_dist) + '\n'
write_buffer.append([abs_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif value2 > upper_bound:
# if the value from list 2 is bigger then the current value, he will be taken into the next round
_cache.append([_c, value2])
elif value2 < upper_bound:
# if the value from list 2 is smaller then the currecnt value, check the next one of list 2
fill_cache()
elif args.unit == 'ppm':
ppm_dist = abs((value1 - value2) / value1 * 1000000)
if ppm_dist <= args.distance:
line_template = '%s\n'
if args.add_distance:
line_template = '%s\t' + str(ppm_dist) + '\n'
write_buffer.append([ppm_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif ppm_dist > args.distance:
_cache.append([_c, value2])
elif ppm_dist < args.distance:
fill_cache()
if args.closest and write_buffer:
write_buffer.sort(key=lambda x: x[0])
out.write(write_buffer[0][1])
else:
for _dist, line in write_buffer:
out.write(line)
write_buffer = list()
cache = _cache
out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge two files on a common column the fuzzy way.')
parser.add_argument('--f1', required=True)
parser.add_argument('--f2', required=True)
parser.add_argument('--c1', type=int, required=True, help="Column in file 1 to be merged on.")
parser.add_argument('--c2', type=int, required=True, help="Column in file 2 to be merged on.")
parser.add_argument('--outfile', required=True)
parser.add_argument('--header', action='store_true', help="The files have a header line at the beginning.")
parser.add_argument('--closest', action='store_true', help="Only report the closest match.")
parser.add_argument('--add_distance', action='store_true', help="Add addional column with the distance between the two values.")
parser.add_argument('--sep', type=str, default="\t", help="Files are separated by this separator.")
parser.add_argument('--distance', type=float, default="0.2", help="Maximal allowed distance.")
parser.add_argument('--unit', choices=['ppm', 'absolute'], default='absolute')
args = parser.parse_args()
main(args)
|
[
"bjoern.gruening@gmail.com"
] |
bjoern.gruening@gmail.com
|
b897b084b288350d1a287661007953393d395943
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBHinc_M_800_TuneZ2star_8TeV_madgraph_cff.py
|
352c4947ebcf1ce31ccf35f0dd2e24c3165cb26a
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,231
|
py
|
import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 800.0D0 ! bprime quarks mass',
'PMAS(7,2) = 8.000D0 ! bprime quark width',
'PMAS(7,3) = 80.00D0 ! Max value above which the BW shape is truncated',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'MDME(58,1)=0 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.0D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 1.0D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] |
sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch
|
41ef33c1c1af378a664ea82f485c5a12ebeedd1c
|
a0fb29f99a852089193e4cc9a11e7263dc3f8b5f
|
/mayan/apps/metadata/literals.py
|
aba1309e370f89d0f6259a24ca393df9dc3e1f1c
|
[
"Apache-2.0"
] |
permissive
|
ikang9712/Mayan-EDMS
|
0e22a944d63657cea59c78023b604a01a622b52a
|
d6e57e27a89805329fe0c5582caa8e17882d94e6
|
refs/heads/master
| 2023-07-28T19:41:55.269513
| 2021-09-07T14:16:14
| 2021-09-07T14:16:14
| 402,884,683
| 1
| 0
|
NOASSERTION
| 2021-09-03T20:00:09
| 2021-09-03T20:00:09
| null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from .parsers import MetadataParser
from .validators import MetadataValidator
DEFAULT_METADATA_AVAILABLE_VALIDATORS = MetadataValidator.get_import_paths()
DEFAULT_METADATA_AVAILABLE_PARSERS = MetadataParser.get_import_paths()
|
[
"roberto.rosario@mayan-edms.com"
] |
roberto.rosario@mayan-edms.com
|
c93ba3313bf6c3ee32e36cad9c787f55c5d4548b
|
8395ffb48750359d1bd51a201a41c7fe124998bc
|
/apc2015/perception/single_utils/src/generate_naive_cloud.py
|
4195bb9783faaf79d4485ed09ada91429266c3d6
|
[] |
no_license
|
duke-iml/ece490-s2016
|
ab6c3d3fb159a28a9c38487cdb1ad3993008b854
|
f9cc992fbaadedc8a69678ba39f0c9d108e6910d
|
refs/heads/master
| 2020-04-12T09:03:56.601000
| 2016-11-29T21:36:48
| 2016-11-29T21:36:48
| 49,226,568
| 2
| 6
| null | 2016-11-29T21:36:49
| 2016-01-07T19:42:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
#!/usr/bin/env python
from __future__ import division
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import subprocess
import time
import psutil
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
from common_utils import *
from math import pi, sin, cos, tan, atan, sqrt
pid = None
file_name = None
rgb_mat = None
depth_mat = None
bridge = CvBridge()
w = 320
h = 240
diag_ang = 74/180*pi
diag = sqrt(w**2+h**2)
lift = diag/2 / tan(diag_ang/2)
def receive_rgb(data):
global rgb_mat
rgb_mat = bridge.imgmsg_to_cv2(data, "bgr8")
if depth_mat is not None:
process()
def receive_depth(data):
global depth_mat
depth_mat = bridge.imgmsg_to_cv2(data, "mono16")
depth_mat = depth_mat[:,:,0]
if rgb_mat is not None:
process()
def process():
psutil.Process(pid).kill()
cv2.imwrite(file_name+".bmp", rgb_mat)
cv2.imwrite(file_name+".depth.bmp", depth_mat)
assert depth_mat.shape == (h, w)
point_cloud = []
for i in range(h):
for j in range(w):
depth = depth_mat[i, j]
b1, g1, r1 = list(rgb_mat[i*2, j*2, :].flatten())
b2, g2, r2 = list(rgb_mat[i*2+1, j*2, :].flatten())
b3, g3, r3 = list(rgb_mat[i*2, j*2+1, :].flatten())
b4, g4, r4 = list(rgb_mat[i*2+1, j*2+1, :].flatten())
b1 = int(b1)
b2 = int(b2)
b3 = int(b3)
b4 = int(b4)
g1 = int(g1)
g2 = int(g2)
g3 = int(g3)
g4 = int(g4)
r1 = int(r1)
r2 = int(r2)
r3 = int(r3)
r4 = int(r4)
r = int((r1+r2+r3+r4)/4)
g = int((g1+g2+g3+g4)/4)
b = int((b1+b2+b3+b4)/4)
rgb = rgb_to_pcl_float(r1, g1, b1)
if depth==32001:
continue
assert depth<20000
coord = (j+0.5-w/2, i+0.5-h/2)
real_x = coord[0]/lift*depth
real_y = coord[1]/lift*depth
point_cloud.append([real_x/1000, real_y/1000, depth/1000, rgb])
write_pcd_file(point_cloud, file_name)
rospy.signal_shutdown("Point cloud made, shutting down...\n")
def main():
global file_name
if len(sys.argv)>=2:
file_name = sys.argv[1]
else:
file_name = 'point_cloud.pcd'
global pid
process = subprocess.Popen('hardware_layer/RealSense_ROS_Emitter', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = process.pid
time.sleep(3)
rospy.init_node('naive_point_cloud', disable_signals=True)
rgb_sub = rospy.Subscriber("/realsense/rgb", Image, receive_rgb, queue_size=1)
depth_sub = rospy.Subscriber("/realsense/depth", Image, receive_depth, queue_size=1)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"hauser.kris@gmail.com"
] |
hauser.kris@gmail.com
|
1ffc8b3649921a8cf943112df31655726ca74210
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/143/usersdata/210/62277/submittedfiles/av2_p3_m2.py
|
26e0d4120d107718e01aaba735ca255a96ae8f9d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# -*- coding: utf-8 -*-
def degrais(a):
soma=0
degrau=0
for i in range(0,len(a)-1,1):
soma=(a[i]-a[i+1])
if soma<0:
soma=soma*(-1)
if soma>degrau:
degrau=soma
return degrau
h=int(input('digite o valor de h:'))
j=[]
for i in range(0,h,1):
numero=int(input('digite o numero:'))
j.append(numero)
x=degrais(j)
print(x)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1ba87cd411f46c264b9fd8759ef716c3d9e27938
|
c06efd90533c51c2b29b7e92cd13723388de25ee
|
/actions/patchStorageV1beta1StorageClass.py
|
a57bbce258efa5ad9e6ef149ec1d897e8648932f
|
[] |
no_license
|
ajohnstone/stackstorm-kubernetes
|
490e4a73daad3713d7c5b5b639d5f30ff1ab3e58
|
99ffad27f5947583a2ab1b56e80c06003d014c47
|
refs/heads/master
| 2021-01-11T23:29:49.642435
| 2016-12-07T13:20:34
| 2016-12-07T13:20:34
| 78,588,572
| 0
| 0
| null | 2017-01-11T00:48:59
| 2017-01-11T00:48:59
| null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from lib import k8s
from st2actions.runners.pythonrunner import Action
class patchStorageV1beta1StorageClass(Action):
def run(self,body,name,config_override=None,pretty=None):
myk8s = k8s.K8sClient(self.config)
args = {}
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if config_override is not None:
args['config_override'] = config_override
if pretty is not None:
args['pretty'] = pretty
return (True, myk8s.runAction('patchStorageV1beta1StorageClass', **args))
|
[
"andy@impulsed.net"
] |
andy@impulsed.net
|
27e3a773e1f3b1c7193ce9a831b0b54a38653ad7
|
cf5f24e5a32f8cafe90d4253d727b1c0457da6a4
|
/algorithm/BOJ_1629.py
|
11a30af639ff558eb56b49660735d2acd32acf3e
|
[] |
no_license
|
seoljeongwoo/learn
|
537659ca942875f6846646c2e21e1e9f2e5b811e
|
5b423e475c8f2bc47cb6dee09b8961d83ab08568
|
refs/heads/main
| 2023-05-04T18:07:27.592058
| 2021-05-05T17:32:50
| 2021-05-05T17:32:50
| 324,725,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# import sys
# input = sys.stdin.readline
# A,B,C=map(int,input().split())
# def solve(a,b):
# if b==1: return a
# ret = solve(a,b//2)%C
# ret = (ret*ret)%C
# if b%2==1: ret = (ret*a)%C
# return ret
# print(solve(A,B)%C)
print(pow(*map(int,input().split())))
|
[
"noreply@github.com"
] |
seoljeongwoo.noreply@github.com
|
df760f3fb2bae9441d342cf168781c8ce3d3cf92
|
261fa6004234ccae2b1a4ff455ae54aefecbb172
|
/ui_extensions/content_export/views.py
|
cc9e021e8399ec531eb798666ee498596ae79847
|
[
"Apache-2.0"
] |
permissive
|
svang001/cloudbolt-forge
|
671575eecd54e1207b7dde144db2fdb6c43c9ddf
|
3796900115876f8a9ee333b75f45e3d60d7705d7
|
refs/heads/master
| 2023-02-23T23:03:33.225739
| 2021-01-19T20:09:21
| 2021-01-19T20:09:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,587
|
py
|
import requests
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.html import mark_safe
from cbhooks.models import (
HookPointAction, RecurringActionJob, ServerAction, ResourceAction, TriggerPoint
)
from extensions.models import UIExtension, XUIIndexer
from extensions.views import admin_extension
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import dialog_view
from utilities.permissions import cbadmin_required
from xui.content_export.forms import ExportContentForm
@admin_extension(title='Exportable Contents', description='All Exportable CloudBolt Contents')
@cbadmin_required
def export_content_list(request):
"""
View for listing metadata for all exportable contents.
"""
proto = request.META['wsgi.url_scheme']
host = request.META['HTTP_HOST']
resp = requests.get('{}://{}/api/v2/exportable-content/?version=dev'.format(proto, host), verify=False)
exportable_contents = []
response = resp.json()
from api.v2.serializers import keys_hyphens_to_underscores
if 'server-actions' in response:
for sa in response['server-actions']:
sa['id'] = sa['package-url'].split('/')[-2]
sa['collections'] = 'server-actions'
exportable_contents.append(keys_hyphens_to_underscores(sa))
if 'orchestration-actions' in response:
for oa in response['orchestration-actions']:
oa['id'] = oa['package-url'].split('/')[-2]
oa['collections'] = 'orchestration-actions'
exportable_contents.append(keys_hyphens_to_underscores(oa))
if 'ui-extension-packages' in response:
XUIIndexer().index()
for ui in response['ui-extension-packages']:
id = ui['package-url'].split('/')[-1]
ui['id'] = UIExtension.objects.get(name=id).id
ui['collections'] = 'ui-extension-packages'
exportable_contents.append(keys_hyphens_to_underscores(ui))
if 'blueprints' in response:
for bp in response['blueprints']:
bp['id'] = bp['package-url'].split('/')[-2]
bp['collections'] = 'blueprints'
exportable_contents.append(keys_hyphens_to_underscores(bp))
if 'recurring-jobs' in response:
for job in response['recurring-jobs']:
job['id'] = job['package-url'].split('/')[-2]
job['collections'] = 'recurring-jobs'
exportable_contents.append(keys_hyphens_to_underscores(job))
if 'resource-actions' in response:
for ra in response['resource-actions']:
ra['id'] = ra['package-url'].split('/')[-2]
ra['collections'] = 'resource-actions'
exportable_contents.append(keys_hyphens_to_underscores(ra))
list_context = {
'exportable_contents': exportable_contents,
'pagetitle': 'Exportable Contents',
}
return render(request, 'content_export/templates/list.html', list_context)
@dialog_view
@cbadmin_required
def export_content_edit(request, id=None, collections=''):
"""
Edit exportable contents
"""
if collections == 'blueprints':
instance = ServiceBlueprint.objects.get(id=id)
elif collections == 'resource-actions':
instance = ResourceAction.objects.get(id=id)
elif collections == 'server-actions':
instance = ServerAction.objects.get(id=id)
elif collections == 'recurring-jobs':
instance = RecurringActionJob.objects.get(id=id)
elif collections == 'orchestration-actions':
instance = HookPointAction.objects.get(id=id)
elif collections == 'ui-extension-packages':
instance = UIExtension.objects.get(id=id)
if request.method == 'POST':
form = ExportContentForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
msg = "Metadata details for {} have been saved.".format(instance)
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = ExportContentForm(instance=instance)
return {
'title': 'Edit Exportable Metadata',
'form': form,
'action_url': reverse('export_content_edit', args=[id, collections]),
'use_ajax': True,
'submit': 'Save',
'extra_onready_js': mark_safe("$('.render_as_datepicker').datepicker({dateFormat: 'yy-mm-dd'});")
}
|
[
"klaratta@cloudboltsoftware.com"
] |
klaratta@cloudboltsoftware.com
|
490fcdfb16141de4f142150d27b614173af087da
|
2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4
|
/giico/quality_control_and_material_testing/doctype/bulk_density_of_masonary/bulk_density_of_masonary.py
|
682281f533740a8c16ef57cb3acb6c2e523d8ca2
|
[
"MIT"
] |
permissive
|
thispl/giico
|
b96cf6b707f361275f8723d15f8ea1f95f908c9c
|
14c5631639ab56a586a7962be9871d722c20e205
|
refs/heads/master
| 2021-06-18T03:56:02.928303
| 2021-04-27T06:42:59
| 2021-04-27T06:42:59
| 200,183,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class BulkDensityOfmasonary(Document):
pass
|
[
"hereabdulla@gmail.com"
] |
hereabdulla@gmail.com
|
56ab994254b3b1de4c46198dd4067152d1c0b8b9
|
47703c8cfd6b6cbbec7ceb2509da1bc049dd621f
|
/udoy_013.py
|
de28dafdc46a3bbd08f2137b5bbcbf693cf22f3f
|
[] |
no_license
|
udoy382/PyCode
|
0638a646bd4cac4095a58135aea97ba4ccfd5535
|
69efde580f019cd41061501554b6193688a0a06f
|
refs/heads/main
| 2023-03-26T17:45:15.943887
| 2021-03-25T14:22:42
| 2021-03-25T14:22:42
| 324,485,735
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
# Short Hand If Else Notation In Python #22
a = int(input("enter a\n"))
b = int(input("enter b\n"))
# 1st
# if a>b: print("A B se bada hai bhai")
# 2nd
# print("B A se bada hai bhai") if a<b else print("A B se bada hai bhai")
|
[
"srudoy436@gmail.com"
] |
srudoy436@gmail.com
|
acdf56c82a6bb37ed814ba0a5223a77421137d5c
|
ef78bd58d61002f45778a40da7759ed0b1998cd3
|
/code/transforms/univariategaussianization.py
|
85eb0ed34aec6c919cee82f5578985a62cf4bd41
|
[
"MIT"
] |
permissive
|
afcarl/isa
|
61e85c0c790c7cc357e0c29fc5bda948e9c77ce4
|
f0497c0cc7bd72e0de7f4f9a8da40e214c22abe9
|
refs/heads/master
| 2020-03-19T21:36:06.716167
| 2013-01-28T18:32:30
| 2013-01-28T18:32:30
| 136,944,562
| 1
| 0
| null | 2018-06-11T15:20:45
| 2018-06-11T15:20:44
| null |
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from scipy.special import erf, erfinv
from scipy.stats import norm
from scipy.optimize import bisect
from numpy import mean, sqrt, asarray, max, min, any
from transforms import Transform
import pdb
class UnivariateGaussianization(Transform):
def __init__(self, mog):
self.mog = mog
def apply(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply model CDF
data = self.mog.cdf(data)
# apply inverse Gaussian CDF
result = erfinv(data * 2. - 1.)
result[result > 6.] = 6.
result[result < -6.] = -6.
return result * sqrt(2.)
def inverse(self, data, max_iter=100):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply Gaussian CDF
data = norm.cdf(data)
# apply inverse model CDF
val_max = mean(self.mog.means) + 1.
val_min = mean(self.mog.means) - 1.
for t in range(data.shape[1]):
# make sure root lies between val_min and val_max
while float(self.mog.cdf(val_min)) > data[0, t]:
val_min -= 1.
while float(self.mog.cdf(val_max)) < data[0, t]:
val_max += 1.
# find root numerically
data[0, t] = bisect(
f=lambda x: float(self.mog.cdf(x)) - data[0, t],
a=val_min,
b=val_max,
maxiter=max_iter,
disp=False)
return data
def logjacobian(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
data_ug = self.apply(data)
return self.mog.loglikelihood(data) - norm.logpdf(data_ug)
|
[
"lucas@theis.io"
] |
lucas@theis.io
|
b6415094da921188a6c07160bf88440442a8f16d
|
049e2fab5e9e8f248e537cbada15d60d60536990
|
/environment/env_multi.py
|
be5637dedf9dd6ef8320973bbc255ebc9740da5c
|
[
"MIT"
] |
permissive
|
RubenPants/RobotSimulator2D
|
adfd8c16ec48b34419cae096d16e5e6714410407
|
334d7b9cab0edb22d4670cfaf39fbed76c351758
|
refs/heads/master
| 2023-05-14T20:09:44.604695
| 2020-07-11T14:16:58
| 2020-07-11T14:16:58
| 223,198,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,070
|
py
|
"""
env_multi.py
Environment where a single genome gets evaluated over multiple games. This environment will be called in a process.
"""
import sys
from config import Config
from environment.entities.game import get_game
from population.population import Population
from utils.dictionary import D_DONE, D_SENSOR_LIST
class MultiEnvironment:
""" This class provides an environment to evaluate a single genome on multiple games. """
__slots__ = {
"batch_size", "game_config", "games", "make_net", "max_steps", "pop_config", "query_net",
}
def __init__(self,
make_net,
query_net,
game_config: Config,
pop_config: Config,
):
"""
Create an environment in which the genomes get evaluated across different games.
:param make_net: Method to create a network based on the given genome
:param query_net: Method to evaluate the network given the current state
:param game_config: Config file for game-creation
:param pop_config: Config file specifying how genome's network will be made
"""
self.batch_size = 0
self.games = []
self.make_net = make_net
self.max_steps = game_config.game.duration * game_config.game.fps
self.query_net = query_net
self.game_config = game_config
self.pop_config = pop_config
def eval_genome(self,
genome,
return_dict=None,
):
"""
Evaluate a single genome in a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return observations corresponding the genome
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Ignore if game has finished
if not f:
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Stop if agent reached target in all the games
if all(finished): break
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = [g.close() for g in games]
def trace_genome(self,
genome,
return_dict=None,
):
"""
Get the trace of a single genome for a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return the traces corresponding the genome-game combination
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Initialize the traces
traces = [[g.player.pos.get_tuple()] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Do not advance the player if target is reached
if f:
traces.append(g.player.pos.get_tuple())
continue
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Update the trace
traces[i].append(g.player.pos.get_tuple())
# Next step
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = traces
# -----------------------------------------------> HELPER METHODS <----------------------------------------------- #
def set_games(self, games):
"""
Set the games-set with new games.
:param games: List of Game-IDs
"""
self.games = games
self.batch_size = len(games)
def get_game_params(self):
"""Return list of all game-parameters currently in self.games."""
return [get_game(i, cfg=self.game_config).game_params() for i in self.games]
def get_multi_env(pop: Population, game_config: Config):
"""Create a multi-environment used to evaluate a population on."""
if sys.platform == 'linux':
from environment.cy.env_multi_cy import MultiEnvironmentCy
return MultiEnvironmentCy(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
else:
return MultiEnvironment(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
|
[
"broekxruben@gmail.com"
] |
broekxruben@gmail.com
|
45b817d4a75f46e4e626eb9c9fb88a7376806c4e
|
0805420ce1890c36aa9e0cc1a782945464433ef6
|
/client/videoplayer/__init__.py
|
b55a1624352086133c05f65c066095386a59df16
|
[] |
no_license
|
cnrat/dec-eve-serenity
|
4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c
|
37519e66a5fbb0d7c417d5cf9778636991efbed8
|
refs/heads/master
| 2021-01-21T03:39:48.969227
| 2016-08-10T05:25:07
| 2016-08-10T05:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\videoplayer\__init__.py
from _videoplayer import *
|
[
"victorique.de.blois@asu.edu"
] |
victorique.de.blois@asu.edu
|
46bf955e07557ee8530320380cf68eb939581578
|
227539d0906cdfbb7cd19f16599c35d5bd09abfd
|
/Stepik_Adaptive_Python/adaptive-python-en-master/Step 070 Riddle.py
|
5762e4cba5be9ed1142cc7c9eba781abb385451a
|
[] |
no_license
|
solomonli/PycharmProjects
|
cceb92a11ec1f9e7fef25bca552d8264c75228a0
|
31673627487db1370424f5b0aeee3e20bb23b47a
|
refs/heads/master
| 2021-06-24T11:59:36.365496
| 2019-07-08T09:53:18
| 2019-07-08T09:53:18
| 148,558,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
riddle = '''
{0} and {1} sat in the tree.
{0} had fallen, {1} was stolen.
What's remaining in the tree?
'''
print(riddle.format(input(), input()))
|
[
"richdad.solomon@gmail.com"
] |
richdad.solomon@gmail.com
|
4a13ba1319edbfe715b0595a65cffb4119942d5b
|
b84c89d0ade21bf8c2df9d0cf8f94d7a27c2824b
|
/test/integration/test_cursor.py
|
fc9dc209577a61eeb75a497eb6aa8552833b627a
|
[
"Apache-2.0"
] |
permissive
|
srlabUsask/py2neo
|
931b06678561201d56a36ec10da7ad4614ab6c87
|
80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f
|
refs/heads/master
| 2022-11-16T21:17:42.319698
| 2020-07-12T23:00:29
| 2020-07-12T23:00:29
| 279,281,481
| 0
| 0
|
Apache-2.0
| 2020-07-13T11:17:53
| 2020-07-13T11:17:50
| null |
UTF-8
|
Python
| false
| false
| 5,453
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises
from py2neo import Record, Subgraph
def test_cannot_move_beyond_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward()
assert not cursor.forward()
def test_can_only_move_until_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(2) == 1
def test_moving_by_zero_keeps_same_position(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(0) == 0
def test_keys_are_populated_before_moving(graph):
cursor = graph.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_after_moving(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_before_moving_within_a_transaction(graph):
with graph.begin() as tx:
cursor = tx.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_stats_available(graph):
cursor = graph.run("CREATE (a:Banana)")
stats = cursor.stats()
assert stats["nodes_created"] == 1
assert stats["labels_added"] == 1
assert stats["contained_updates"] == 1
def test_current_is_none_at_start(graph):
cursor = graph.run("RETURN 1")
assert cursor.current is None
def test_current_updates_after_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert cursor.current == Record(zip(["n"], [n]))
def test_select_picks_next(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_cannot_select_past_end(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_selection_triggers_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
for i in range(1, 11):
n, n_sq = next(cursor)
assert n == i
assert n_sq == i * i
def test_can_use_next_function(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_raises_stop_iteration(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_can_get_data(graph):
cursor = graph.run("UNWIND range(1, 3) AS n RETURN n, n * n AS n_sq")
data = cursor.data()
assert data == [{"n": 1, "n_sq": 1}, {"n": 2, "n_sq": 4}, {"n": 3, "n_sq": 9}]
def test_stream_yields_all(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [1, 1])),
Record(zip(["n", "n_sq"], [2, 4])),
Record(zip(["n", "n_sq"], [3, 9])),
Record(zip(["n", "n_sq"], [4, 16])),
Record(zip(["n", "n_sq"], [5, 25])),
Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_stream_yields_remainder(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
cursor.forward(5)
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_can_evaluate_single_value(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate()
assert value == 1
def test_can_evaluate_value_by_index(graph):
cursor = graph.run("RETURN 1, 2")
value = cursor.evaluate(1)
assert value == 2
def test_can_evaluate_value_by_key(graph):
cursor = graph.run("RETURN 1 AS first, 2 AS second")
value = cursor.evaluate("second")
assert value == 2
def test_evaluate_with_no_records_is_none(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
value = cursor.evaluate()
assert value is None
def test_evaluate_on_non_existent_column_is_none(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate(1)
assert value is None
def test_to_subgraph(graph):
s = graph.run("CREATE p=(:Person {name:'Alice'})-[:KNOWS]->(:Person {name:'Bob'}) RETURN p").to_subgraph()
assert isinstance(s, Subgraph)
assert len(s.nodes) == 2
assert len(s.relationships) == 1
|
[
"nigel@neo4j.com"
] |
nigel@neo4j.com
|
bf6422eb78f6c700211eaab310ce54a6a70d1a4b
|
22c56d6cb744a0b7a5879376bed0f8e12abbf357
|
/14_xi/04_ParallelogramVOn4Lines.py
|
3137178f0acbda5f06e2778f3972f981a83f2fb7
|
[
"MIT"
] |
permissive
|
mirefek/py_euclidea
|
8854bd648e4e5cbadaca9d48fffb6f31d5a3447e
|
8e400cbf36e3c8919fcc0032b7a95ce55012416e
|
refs/heads/master
| 2023-08-30T14:12:28.195003
| 2021-11-16T21:02:20
| 2021-11-16T21:02:20
| 215,083,101
| 7
| 3
| null | 2021-10-05T15:56:38
| 2019-10-14T15:45:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
from constructions import *
def init(env):
A = env.add_free(263.0, 116.0, hidden = True)
B = env.add_free(488.5, 335.0, hidden = True)
C = env.add_free(140.0, 335.0, hidden = True)
X = env.add_free(280.0, 181.5, hidden = True)
l1 = env.add_line(A,B)
l2 = env.add_line(A,C)
l3 = env.add_line(B,C)
l4 = env.add_constr(parallel_tool, (l3,X), Line)
M = env.add_free(296.5, 235.5)
env.set_tools(
"move", "point", "line", "circle",
"perp_bisector", "angle_bisector",
"perpendicular", "parallel",
"compass", "intersection",
)
env.goal_params(l1,l2,l3,l4,M)
def construct_goals(l1,l2,l3_in,l4_in,M):
result = []
for (l3,l4) in (l3_in,l4_in), (l4_in,l3_in):
A = intersection_tool(l1, reflect_by_point(l3, M))
B = intersection_tool(l2, reflect_by_point(l4, M))
C = reflect_by_point(A, M)
D = reflect_by_point(B, M)
result.append((
segment_tool(A,B),
segment_tool(B,C),
segment_tool(C,D),
segment_tool(D,A),
))
return result
|
[
"mirek@olsak.net"
] |
mirek@olsak.net
|
2fff3390b23f34ecccaa20ba3b41671bdfaebfa5
|
e3cd9de7d7e68e5995680a297fa25652487b0d02
|
/tests/sum_squares_test.py
|
b2ef648f012073ee2f9ded722f3ce60b17d76950
|
[
"Apache-2.0"
] |
permissive
|
bsaghafi/erdos
|
2293993bb336d0a9466a17cc15236390c379d8f8
|
ac27a9607f2550bbac999a0c5fb36c84c2860d2e
|
refs/heads/master
| 2020-08-21T02:11:06.982785
| 2019-06-26T23:55:44
| 2019-06-26T23:55:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
from erdos.data_stream import DataStream
from erdos.message import Message
import erdos.graph
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import frequency
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
class IntegerOp(Op):
"""Operator which publishes an integer every second"""
def __init__(self, name, number):
super(IntegerOp, self).__init__(name)
self.number = np.int64(number)
@staticmethod
def setup_streams(input_streams):
return [DataStream(name="integer_out")]
@frequency(1)
def publish_random_number(self):
output_msg = Message(self.number, Timestamp(coordinates=[0]))
self.get_output_stream("integer_out").send(output_msg)
print("%s sent %d" % (self.name, self.number))
def execute(self):
self.publish_random_number()
self.spin()
class SquareOp(Op):
"""Operator which publishes the square of its input"""
def __init__(self, name):
super(SquareOp, self).__init__(name)
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SquareOp.on_next)
return [DataStream(name="square_output")]
def on_next(self, msg):
value = msg.data
result = value**2
self.get_output_stream("square_output").send(
Message(result, msg.timestamp))
print("%s received: %d ^ 2 = %d" % (self.name, value, result))
def execute(self):
self.spin()
class SumOp(Op):
"""Operator which sums the most recently published values for each input.
Sum operation occurs once every second.
"""
def __init__(self, name):
super(SumOp, self).__init__(name)
self.sum = 0
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SumOp.add)
return [DataStream(name="sum_output")]
@frequency(1)
def publish_sum(self):
result = self.sum
output_msg = Message(result, Timestamp(coordinates=[0]))
self.get_output_stream("sum_output").send(output_msg)
def add(self, msg):
value = msg.data
original = self.sum
self.sum += msg.data
print("%s: %d (original) + %d (received) = %d (result)"
% (self.name, original, value, self.sum))
def execute(self):
self.publish_sum()
self.spin()
def main(argv):
"""Sums the squares of 2 numbers. """
# Set up graph
graph = erdos.graph.get_current_graph()
# Add operators
int1 = graph.add(IntegerOp, name='int1', init_args={'number': 1})
int2 = graph.add(IntegerOp, name='int2', init_args={'number': 2})
square1 = graph.add(SquareOp, name='square')
square2 = graph.add(SquareOp, name='square2')
sum = graph.add(SumOp, name='sum')
# Connect operators
graph.connect([int1], [square1])
graph.connect([int2], [square2])
graph.connect([square1, square2], [sum])
# Execute graph
graph.execute(FLAGS.framework)
if __name__ == "__main__":
app.run(main)
|
[
"gogionel@gmail.com"
] |
gogionel@gmail.com
|
69093d96a03fc2ddc7f4fd1fb870114f283018ca
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03574/s625136603.py
|
8c1ddbc3ce968e40601728c96995e7838eb37d66
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
h,w=map(int, input().split())
w1=['.'*(w+2)]
s=w1+['.'+input()+'.' for _ in range(h)]+w1
for i in range(1,h+1):
for j in range(1,w+1):
if s[i][j]=='.':
t=s[i-1][j-1:j+2]+s[i][j-1:j+2]+s[i+1][j-1:j+2]
s[i]=s[i][:j]+str(t.count('#'))+s[i][j+1:]
print(s[i][1:-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c2df6d6c9916fde341abb0d235790b8577ee05b0
|
2c838d3ffee6e357014dd0cd543ef841503d6647
|
/src/Watcher/transforms/client2manufact.py
|
96618403f6e2beb9ec4a0730b0ab2fe62594ced8
|
[] |
no_license
|
catalyst256/Watcher
|
079bb0ffead77c46a814e01e851cf1b6a33b2678
|
14123f501643475fc97b64093284c1b509897550
|
refs/heads/master
| 2021-01-25T10:29:18.110796
| 2015-01-16T07:43:44
| 2015-01-16T07:43:44
| 14,232,782
| 21
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#!/usr/bin/env python
import sqlite3 as lite
from common.entities import WirelessClient, Vendor
from canari.maltego.message import UIMessage
from canari.framework import configure #, superuser
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2013, Watcher Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = 'catalyst256@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
#@superuser
@configure(
label='Watcher - MAC Address Lookup',
description='Tries to work out the vendor from the MAC address',
uuids=[ 'Watcher.v2.client_2_manufacturer' ],
inputs=[ ( 'Watcher', WirelessClient ) ],
debug=True
)
def dotransform(request, response):
mac_addr = request.value[:-9].upper()
mac_addr = mac_addr.replace(':', '')
mac_db = 'Watcher/resources/databases/macaddr.db'
mac_vendor = []
con = lite.connect(mac_db)
with con:
cur = con.cursor()
cur.execute('SELECT * FROM macaddr WHERE mac like ' + "\"" + mac_addr + "\"")
while True:
row = cur.fetchone()
if row == None:
break
if row[1] not in mac_vendor:
mac_vendor.append(row[1])
for x in mac_vendor:
e = Vendor(x)
response += e
return response
|
[
"catalyst256@gmail.com"
] |
catalyst256@gmail.com
|
e29899992d7b9d372aed601eae6f1f6896db9247
|
a83dc7ccce7962addbb7a7d3f45eea1dac000a21
|
/10day/2.py
|
8d49febd380f81aa32131265dce0dbbe43835e22
|
[] |
no_license
|
liruixiong/1808
|
879bb90587db0a7073e1a9b5b6c98e7d754feaf9
|
45f67f0ea8b25a7a68efd07272f6f361eae625c3
|
refs/heads/master
| 2020-03-25T19:34:37.676624
| 2018-08-22T01:49:04
| 2018-08-22T01:49:04
| 144,089,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
at =float(input(" "))
y = float(input(" "))
mo = input("+-*/")
if mo == "+":
print(at+y)
elif mo == "-":
print(at-y)
elif mo == "*":
print(at*y)
elif mo == "/":
print(at/y)
|
[
"1356703763@qq.com"
] |
1356703763@qq.com
|
5f95567bceaf7b570e56328ed86f10ff0b772f05
|
940d7b93fb27e8eead9b6e52bc5c7444666744dd
|
/python/src/Demo/cgi/cgi2.py
|
d956f6538c63219fc0c7486a6b8aec4cd0f38de9
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"Apache-2.0"
] |
permissive
|
pilotx45/sl4a
|
d446531d310cc17d93f24aab7271a0813e8f628d
|
150e3e46b5103a9b9a391034ef3fbc5bd5160d0f
|
refs/heads/master
| 2022-03-24T19:48:30.340479
| 2022-03-08T16:23:58
| 2022-03-08T16:23:58
| 277,016,574
| 1
| 0
|
Apache-2.0
| 2022-03-08T16:23:59
| 2020-07-04T01:25:36
| null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
#!/usr/local/bin/python
"""CGI test 2 - basic use of cgi module."""
import cgitb; cgitb.enable()
import cgi
def main():
form = cgi.FieldStorage()
print "Content-type: text/html"
print
if not form:
print "<h1>No Form Keys</h1>"
else:
print "<h1>Form Keys</h1>"
for key in form.keys():
value = form[key].value
print "<p>", cgi.escape(key), ":", cgi.escape(value)
if __name__ == "__main__":
main()
|
[
"damonkohler@gmail.com"
] |
damonkohler@gmail.com
|
b6bde677aac4f26f15c0fe037c8ece62d778b970
|
f4de413ad77ffaa9b2e7d65e1579a8d2696c0c42
|
/classifier/rnn.py
|
93be5b6baf981193a36d1dee3fc2ddf89ffa91f5
|
[] |
no_license
|
BinbinBian/Parable
|
b4d93d4fef2bb02f19cb3571501c8a8162045ff1
|
f2ceb0b9a5749db7578c95edcbd2a26adb7249cf
|
refs/heads/master
| 2021-01-17T18:44:06.129814
| 2016-05-07T06:13:35
| 2016-05-07T06:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from rnn_layers import *
import theano
import numpy as np
class RNNEncoderDecoder(object):
"""
A RNN Encoder-Decoder Framework
"""
class StochasticRNN(object):
"""
RNN that can encode arbitrarily long sequence
(thousands of time steps)
(best for QA, Paragraph chunking tasks)
"""
|
[
"leo.niecn@gmail.com"
] |
leo.niecn@gmail.com
|
53e60a6387d3a899ed311a33fdaded25fdf5e460
|
c725fc58d217f6730687a565fbf85fcf174e8009
|
/code_SDSS/sql_bright_star.py
|
549fb863ca8b726b5c8de71ac5c9955cb27620e0
|
[] |
no_license
|
Kein-Cary/Intracluster-Light
|
6faca2bd0413244765474beeffd53cfaa401eef2
|
ffcb2d6ea10be45422c7e73408fc6ff6cadf3a85
|
refs/heads/master
| 2023-03-18T04:51:06.539453
| 2023-03-12T02:48:01
| 2023-03-12T02:48:01
| 160,816,520
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
import h5py
import numpy as np
import astropy.io.fits as fits
import mechanize
import pandas as pd
from io import StringIO
import astropy.units as U
import astropy.constants as C
from astropy import cosmology as apcy
#url = 'http://skyserver.sdss.org/dr12/en/tools/search/sql.aspx'
url = 'http://cas.sdss.org/dr7/en/tools/search/sql.asp'
load = '/media/xkchen/My Passport/data/SDSS/'
with h5py.File(load + 'mpi_h5/sample_catalog.h5', 'r') as f:
catalogue = np.array(f['a'])
z = catalogue[0]
ra = catalogue[1]
dec = catalogue[2]
#r_select = 0.16676 # centered at BCG, radius = 10 arcmin (1515.15 pixel)
r_select = 0.42 ## 1.5 * diagonal line length
N_tot = len(z)
sub_N = N_tot * 1
no_match = []
for kk in range( N_tot ):
ra_g = ra[kk]
dec_g = dec[kk]
z_g = z[kk]
c_ra0 = str(ra_g - r_select)
c_dec0 = str(dec_g - r_select)
c_ra1 = str(ra_g + r_select)
c_dec1 = str(dec_g + r_select)
# query stars and saturated sources (may not be stars)
data_set = """
SELECT ALL
p.ra, p.dec, p.u, p.g, p.r, p.i, p.z, p.type,
p.isoA_u, p.isoA_g, p.isoA_r, p.isoA_i, p.isoA_z,
p.isoB_u, p.isoB_g, p.isoB_r, p.isoB_i, p.isoB_z,
p.isoPhi_u, p.isoPhi_g, p.isoPhi_r, p.isoPhi_i, p.isoPhi_z,
p.flags, dbo.fPhotoFlagsN(p.flags)
FROM PhotoObj AS p
WHERE
p.ra BETWEEN %s AND %s AND p.dec BETWEEN %s AND %s
AND (p.type = 6 OR (p.flags & dbo.fPhotoFlags('SATURATED')) > 0)
ORDER by p.r
""" % (c_ra0, c_ra1, c_dec0, c_dec1)
br = mechanize.Browser()
resp = br.open(url)
resp.info()
br.select_form(name = "sql")
br['cmd'] = data_set
br['format'] = ['csv']
response = br.submit()
s = str(response.get_data(), encoding = 'utf-8')
doc = open('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g), 'w')
print(s, file = doc)
doc.close()
try:
cat = pd.read_csv('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g),)
try_ra = np.array(cat.ra)
except:
no_match.append('%d, %.3f,%.3f,%.3f' % (kk, ra_g, dec_g, z_g) )
sub_N -= 1
doc = open('No_source_match_sample.txt', 'w')
for ll in range(len(no_match)):
subx = no_match[ll]
print(subx, file = doc)
doc.close()
print(sub_N)
|
[
"cxkast@gmail.com"
] |
cxkast@gmail.com
|
b023906757f0266c579b3042d843bdd4da38d017
|
8126291334a4288f51b1116ea31e953debf07039
|
/SRC/engine/IO/propertyoutput.spy
|
11311550633bb57671c61075db7d567d2fda3223
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jumpingyu/OOF2
|
846a7dd506f029535153834607b698ce32dc155d
|
31a25398b046c1963859dd96785329d2a9af8681
|
refs/heads/master
| 2020-05-21T09:12:07.013560
| 2019-04-02T21:05:49
| 2019-04-02T21:05:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,216
|
spy
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import symmmatrix
from ooflib.common import debug
from ooflib.common import utils
from ooflib.engine.IO import output
from ooflib.engine.IO import outputClones
import types, sys
# The PropertyOutputRegistration subclasses create an Output object
# for each registered PropertyOutput. This bridges the gap between
# the C++ PropertyOutputs and the more general Python Outputs.
class PORegBase(PropertyOutputRegistration):
## Callback for all PropertyOutputs. Outputs that need to return
## something other than a list of OutputVal instances should
## override the convert method.
def opfunc(self, mesh, elements, coords, **params):
po = self.instantiate(params)
mesh.precompute_all_subproblems()
initializer = self.initializer()
results = []
for element, ecoords, in zip(elements, coords):
mat = element.material()
mesh.begin_all_subproblems(element)
results.extend(po.evaluate(mesh, element, initializer, ecoords))
mesh.end_all_subproblems(element)
return self.convert(results)
def convert(self, results):
return results
##### Scalar outputs
class ScalarPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ScalarPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.ScalarOutputValPtr,
instancefn=outputClones.scalar_instancefn,
column_names=outputClones.single_column_name,
params=parameters,
srepr=srepr, tip=tip, discussion=discussion)
output.defineScalarOutput(name, op, ordering=ordering)
output.defineAggregateOutput(name, op, ordering=ordering)
# def convert(self, results): # convert from ScalarOutputVal to Float
# return [r.value() for r in results]
##### SymmMatrix3 outputs.
def _symmmatrix3_instancefn(self):
return symmmatrix.SymmMatrix3(0.,0.,0.,0.,0.,0.)
def _symmmatrix3_column_names(self):
sr = self.shortrepr()
names = []
it = self.outputInstance().getIterator()
while not it.end():
names.append("%s[%s]" % (sr, it.shortstring()))
it.next()
return names
class SymmMatrix3PropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or SymmMatrix3PropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=_symmmatrix3_instancefn,
srepr=srepr,
column_names=_symmmatrix3_column_names,
params=parameters,
tip=tip, discussion=discussion)
output.defineAggregateOutput(name+":Value", op, ordering=ordering)
def comprepr(s):
comp = s.resolveAlias("component").value
# We have to pass s to op.shortrepr so that the shortrepr
# will be computed for the actual Output, not the Output
# defined above. The actual output will be a clone of the
# one defined there.
return "%s[%s]" % (op.shortrepr(s), comp)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
srepr=comprepr,
discussion=
"""
<para>Compute the specified component of %s on a &mesh;.</para>
"""
% name)
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
def invariantrepr(s):
invariant = s.resolveAlias("invariant").value.shortrepr()
# See comment above about op.shortrepr(s)
return "%s(%s)" % (invariant, op.shortrepr(s))
invout = outputClones.InvariantOutput.clone(
name=name+" Invariant",
srepr=invariantrepr,
tip='Compute invariants of %s' % name,
discussion="""
<para>Compute the specified invariant of %s on a &mesh;.</para>
"""
% name)
invout.connect('field', op)
for param in parameters:
invout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Invariant", invout, ordering=ordering)
output.defineAggregateOutput(name+":Invariant", invout,
ordering=ordering)
# ThreeVector outputs
## TODO 3D: These should add themselves as "Value" outputs, and there
## should be an "Invariant" output, also, since 3-vectors have a
## magnitude. srepr's and column_name's need to be adjusted/provided.
## None of this is implemented yet because there are no
## ThreeVectorPropertyOutputs to test it on.
class ThreeVectorPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ThreeVectorPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=outputClones.vector_instancefn,
params=parameters,
srepr=srepr, tip=tip,
discussion=discussion)
output.defineAggregateOutput(name, op, ordering=ordering)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
discussion=
"""
<para>Compute the specified component of <link
linkend='Output-%s'>%s</link> on a &mesh;.</para>
"""
% (name, name))
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:'+param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
|
[
"lnz5@rosie.nist.gov"
] |
lnz5@rosie.nist.gov
|
c8ce9fe2ffe6f9aad8ba442ef8c5905d1888f424
|
c97d3c8848e4f03edb6c64b6abff530a6e74d616
|
/apps/models_sklearn_spark/Matrix_factorization/handler.py
|
1b6060f59557d47ea3890cf8f7f98d14845086ee
|
[
"Apache-2.0"
] |
permissive
|
simhaonline/Django_web
|
eeb80d8f32a460258fceb30ecececd7410949f72
|
f7df1a7b101d41835a334b78cddf3570968799e4
|
refs/heads/master
| 2023-04-24T23:33:51.535515
| 2021-04-02T15:20:29
| 2021-04-02T15:20:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ : 矩阵分解
# __date__: 2020/09/09 09
try:
from apps.data.handler import get_ml_1m_ratings_df
except:
pass
from math import sqrt
from sklearn.metrics import mean_squared_error
from scipy.sparse.linalg import svds
from sklearn.model_selection import cross_validate, train_test_split
import sys
import os
import numpy as np
import pandas as pd
from functools import lru_cache
# sys.path.append(os.path.dirname(os.path.abspath('./')))
# from apps.models_sklearn_spark.Matrix_factorization.handler import ratings_df
# apps_floder = os.path.dirname(os.path.dirname(__file__))
# ratings_file = os.path.join(apps_floder, 'data\\ml-1m\\ratings.csv')
# ratings_df = pd.read_csv(ratings_file, sep=',', engine='python')
def data_split(
ratings_df: '数据',
ratio: '分割数据的比例' = 1/4) -> (pd.DataFrame, pd.DataFrame):
"""
按照ratio比例分割数据
"""
train_data = ratings_df.head(round(len(ratings_df)*ratio))
test_data = ratings_df.tail(round(len(ratings_df)*(1-ratio)))
return train_data, test_data
def get_data_sparsity(ratings_df, n_users, n_movies) -> float:
"""
计算数据集的稀疏度
"""
sparsity = round(ratings_df.size/float(n_users*n_movies), 3)
print('The sparsity level of MovieLens is ' + str(sparsity))
return sparsity
def create_uesr_item(ratings_df, n_users, n_movies) -> (np.ndarray, np.ndarray):
"""
创建uesr-item矩阵,此处需创建训练和测试两个UI矩阵,n_users cols * n_movies rows
"""
train_data, test_data = data_split(ratings_df)
train_data_matrix = np.zeros((n_users, n_movies))
for line in train_data.itertuples():
train_data_matrix[line[1] - 1, line[2] - 1] = line[3]
test_data_matrix = np.zeros((n_users, n_movies))
for line in test_data.itertuples():
test_data_matrix[line[1] - 1, line[2] - 1] = line[3]
return train_data_matrix, test_data_matrix
def rmse(prediction, ground_truth) -> float:
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten()
res = sqrt(mean_squared_error(prediction, ground_truth))
return res
@lru_cache(None)
def mf_svds(k) -> (float, np.ndarray):
ratings_df = get_ml_1m_ratings_df()
n_users = max(ratings_df.UserID.unique())
n_movies = max(ratings_df.MovieID.unique())
print('Number of users = ' + str(n_users) +
' | Number of movies = ' + str(n_movies))
train_data_matrix, test_data_matrix = create_uesr_item(
ratings_df, n_users, n_movies)
u, s, vt = svds(train_data_matrix, k=20)
u.shape, s.shape, vt.shape
s_diag_matrix = np.diag(s)
X_pred = np.dot(np.dot(u, s_diag_matrix), vt)
_rmse = rmse(X_pred, test_data_matrix)
print('User-based CF MSE: ' + str(_rmse))
return _rmse, X_pred
|
[
"aboyinsky@outlook.com"
] |
aboyinsky@outlook.com
|
ee4a8bd968583926c1ed2877ab805846d1966635
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/16144140.py
|
a395cacae7ac1620e027f02c873102b4b6342cf3
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16144140.py generated: Wed, 25 Jan 2017 15:25:29
#
# Event Type: 16144140
#
# ASCII decay Descriptor: [Xi_b0 -> (Lambda0 -> p+ pi-) (J/psi(1S) -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 16144140
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Xib0_JpsiLambda,mm=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 5232,-5232 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
b768b58cf3721bb2f6b3a2fc866798aa78ca6847
|
a990bd26d3a69d1ea6699c85efa2cea99452c3df
|
/problems/leetcode/rottingOranges994.py
|
5388c929d4cee4a0f12199681fa2844bb927234b
|
[] |
no_license
|
abecus/DS-and-Algorithms
|
5f1a948a085465ae165090ec957a9d5307ce729d
|
3259e8183382265a27cf8c91e37d0086175a5703
|
refs/heads/master
| 2022-05-05T07:07:08.194243
| 2022-04-05T16:23:39
| 2022-04-05T16:23:39
| 193,111,610
| 11
| 6
| null | 2020-11-18T16:19:18
| 2019-06-21T14:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
"""
_________________________994. Rotting Oranges_________________________
Difficulty: Medium Likes: 1259 Dislikes: 170 Solution: Available
Total Accepted: 77.3K Total Submission: 164.3K Acceptance Rate: 47.0%
Tags: Breadth-first Search
In a given grid, each cell can have one of three values: the value 0
representing an empty cell; the value 1 representing a fresh orange;
the value 2 representing a rotten orange. Every minute, any fresh
orange that is adjacent (4-directionally) to a rotten orange becomes
rotten. Return the minimum number of minutes that must elapse until no
cell has a fresh orange. If this is impossible, return -1 instead.
Example 1:
Input: [[2,1,1],[1,1,0],[0,1,1]]
Output: 4
Example 2:
Input: [[2,1,1],[0,1,1],[1,0,1]]
Output: -1
Example 3:
Input: [[0,2]]
Output: 0
Note:
1 <= grid.length <= 101 <= grid[0].length <= 10grid[i][j] is only 0, 1, or 2.
"""
def orangesRotting(grid):
r=len(grid)
c=len(grid[0])
def get_adj(i,j):
for x,y in zip([1,-1,0,0],[0,0,-1,1]):
if 0<=i+x<r and 0<=j+y<c:
yield (i+x,j+y)
q=[(i,j) for i in range(r) for j in range(c) if grid[i][j]==2]
res=0
while q:
temp = []
for i,j in q:
for x,y in get_adj(i,j):
if grid[x][y]==1:
grid[x][y]=2
temp.append((x,y))
res+=1
q = temp.copy()
for i in range(r):
for j in range(c):
if grid[i][j]==1:
return -1
return res-1 if res else res
if __name__ == "__main__":
grid = [[2,1,1],[1,1,0],[0,1,1]]
# grid = [[2,1,1],
# [0,1,1],
# [1,0,1]]
# grid = [[0,1]]
print(orangesRotting(grid,))
"""
similarQuestions::
Walls and Gates: Medium
"""
|
[
"insaaone@gmail.com"
] |
insaaone@gmail.com
|
b3d2499cc45fea03a267a459dd73d738e8962baa
|
601362aea0d323309bea046d93ef3f2abe090718
|
/flog/libs/wikipedia.py
|
b8f67543bdc12c2b8f795d5ecf414fb4fbf6e2b9
|
[] |
no_license
|
ErikBurdett/flog
|
cca1d780835351b7017b993e4047d43a437c6504
|
633bd3ff95b62766fcf40d76513d27b8785870a0
|
refs/heads/master
| 2022-10-30T17:01:31.538700
| 2020-06-16T04:40:14
| 2020-06-16T04:40:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import requests
def random_article():
url = f'https://en.wikipedia.org/api/rest_v1/page/random/title'
return requests.get(url, timeout=2.0).json()
|
[
"randy@thesyrings.us"
] |
randy@thesyrings.us
|
2bab2de433e731e2c1376160a0148c2a824ea777
|
46083e01408b6bb2e05f78a38f69fd5a6881a4c3
|
/autotesting/test2/testcase/testbase.py
|
1611bc6797907d0c433ece866a2067286b297c17
|
[] |
no_license
|
caixinshu/api
|
7601ce97ed6666cbc5995ecd1e32165605a7da7e
|
b75bf1bdbf4ee14f0485d552ff2f382c7991821e
|
refs/heads/master
| 2021-01-22T05:54:14.651098
| 2019-12-17T09:09:23
| 2019-12-17T09:19:26
| 81,718,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
# -*- coding: utf-8 -*
import requests
from suds.client import Client
from config import url
from tools import readexceldata
#初始化url
class test:
def __init__(self,url,file,colnameindex,by_name):
self.url=url
self.file=file
self.colnameindex=colnameindex
self.by_name=by_name
def getclient(self):#生成客户端
client=Client(self.url)
return client
def getdata(self):#获得excel表单数据
data=readexceldata.excel_table_byname(self.file,self.colnameindex,self.by_name)
return data
def main():
test1=test(url.loginurl,"E:\\workspacepython\\apiteststudy\\data\\study.xls",0,"login")
print "111"
print test1.getdata()
if __name__=="__main__":
main()
|
[
"651696408@qq.com"
] |
651696408@qq.com
|
3a04a77da2ee5df5107a7f1f4186b15aaa3400bd
|
ca08100b33a78c01bf49f097f4e80ed10e4ee9ad
|
/intrepidboats/apps/boats/migrations/0025_auto_20170518_1334.py
|
4d0337d5918da1292d1f741df70a316bbba6feec
|
[] |
no_license
|
elite0401/intrepidpowerboats
|
347eae14b584d1be9a61ca14c014135ab0d14ad0
|
d2a475b60d17aa078bf0feb5e0298c927e7362e7
|
refs/heads/master
| 2021-09-11T01:51:47.615117
| 2018-04-06T02:20:02
| 2018-04-06T02:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-18 17:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boats', '0024_video_video_external_url'),
]
operations = [
migrations.AlterField(
model_name='abouttheboatimage',
name='kind',
field=models.CharField(choices=[('EXTERIOR', 'exterior'), ('INTERIOR', 'interior'), ('CABIN', 'cabin')], max_length=25, verbose_name='kind'),
),
]
|
[
"elite.wisdom@gmx.com"
] |
elite.wisdom@gmx.com
|
5493043be3c35aaaa1701498e246f4f8555ae5d7
|
8b2aeac35b73d03587251311fcd171e72a8fc854
|
/photos/migrations/0002_auto_20180128_1207.py
|
4b21b603c9fe41632188a38fc2948d97f3dcf7af
|
[] |
no_license
|
mansonul/wedding
|
78e273cf68b5897136c0b8ef18c664c3cfa505e2
|
3168faa79f1c223eb078e0e1941a2ddfeab903c4
|
refs/heads/master
| 2021-05-10T18:16:13.795886
| 2018-01-29T18:13:41
| 2018-01-29T18:13:41
| 118,626,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-28 12:07
from __future__ import unicode_literals
from django.db import migrations
import imagekit.models.fields
import photos.models
class Migration(migrations.Migration):
dependencies = [
('photos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='photoupload',
name='image',
field=imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to=photos.models.PhotoUpload.path_and_rename),
),
]
|
[
"contact@dragosnicu.com"
] |
contact@dragosnicu.com
|
7b485e6c81c4efd3aac47646b1b61652249aa27d
|
f9b5a01d8cfeddc0c52fcbfc593fa0f31c4df1bf
|
/ex06/ex06.py
|
6367fad5aff079d421fae75ad052baafeb043335
|
[] |
no_license
|
wogurdlek2/16PFA-2013211032
|
358154af14f65b7fd635dd9a682dd9ea22d7539e
|
478616b3a090c596afba1b62f01152d468e0f014
|
refs/heads/master
| 2021-01-21T12:59:29.942224
| 2016-05-25T11:31:16
| 2016-05-25T11:31:16
| 53,999,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
x = "There are %d types of prople." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I sais: %r." % x
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
|
[
"CAD Client"
] |
CAD Client
|
bb42ad482fbb2350569ef7809947d727ac99b2f2
|
9ecfba7ed75b2869b09ec3e79c1f45dab21b9640
|
/others/cropimage.py
|
2583ee07c70b7822aad8ceca2237d18e83ee22a9
|
[
"MIT"
] |
permissive
|
pection/Scraper-website
|
ca7af593e421d4f09bfc280d6ec24e6562e0f6c3
|
77ed1df5103e1d8222a055c19acf5af255ffa4aa
|
refs/heads/master
| 2022-12-25T15:51:46.958483
| 2020-10-07T13:58:40
| 2020-10-07T13:58:40
| 315,717,273
| 1
| 0
|
MIT
| 2020-11-24T18:18:42
| 2020-11-24T18:18:41
| null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import cv2
import os
import sys
import numpy as np
from PIL import Image
num=1
path ="//Users/pection/Documents/Crop/"
#we shall store all the file names in this list
filelist=[]
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".png")):
filelist.append(os.path.join(root,file))
print (filelist)
# logo=Image.open('logo.png')
# logo2=Image.open('logo2.png')
# watermark = Image.open('WatermarkB5.png')
# watermark2 = Image.open('WatermarkB3.png')
#
# logoWidth = watermark.width
# logoHeight = watermark.height
# watermarkW=watermark.width
# watermarkH=watermark.height
# logo2Width = watermark2.width
# logo2Height = watermark2.height
for filename in filelist:
img = cv2.imread(filename,-1)
crop_img = img[40:450, 40:450]
cv2.imwrite(filename,crop_img)
|
[
"pection.naphat@gmail.com"
] |
pection.naphat@gmail.com
|
4171f3f9288b6953d7b6ea9c6d40cec41f3b8406
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/contrib/slim/python/slim/nets/inception_v3.pyi
|
df9dccde040bd84ccfd994e2ec65a1450b9e965f
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
pyi
|
# Stubs for tensorflow.contrib.slim.python.slim.nets.inception_v3 (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib import layers as layers
from tensorflow.contrib.framework.python.ops import arg_scope as arg_scope
from tensorflow.contrib.layers.python.layers import initializers as initializers, regularizers as regularizers
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as array_ops, init_ops as init_ops, nn_ops as nn_ops, variable_scope as variable_scope
from typing import Any as Any, Optional as Optional
trunc_normal: Any
def inception_v3_base(inputs: Any, final_endpoint: str = ..., min_depth: int = ..., depth_multiplier: float = ..., scope: Optional[Any] = ...): ...
def inception_v3(inputs: Any, num_classes: int = ..., is_training: bool = ..., dropout_keep_prob: float = ..., min_depth: int = ..., depth_multiplier: float = ..., prediction_fn: Any = ..., spatial_squeeze: bool = ..., reuse: Optional[Any] = ..., scope: str = ...): ...
def inception_v3_arg_scope(weight_decay: float = ..., batch_norm_var_collection: str = ..., batch_norm_decay: float = ..., batch_norm_epsilon: float = ..., updates_collections: Any = ..., use_fused_batchnorm: bool = ...): ...
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
f0d8f6c720eb71434eb0ba1ce0acdcdedf4ed128
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/videoio_registry.py
|
0be5f4768f19952c2adff113bfb96d3a9ccf5394
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
# encoding: utf-8
# module cv2.videoio_registry
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# functions
def getBackendName(api): # real signature unknown; restored from __doc__
"""
getBackendName(api) -> retval
. @brief Returns backend API name or "UnknownVideoAPI(xxx)"
. @param api backend ID (#VideoCaptureAPIs)
"""
pass
def getBackends(): # real signature unknown; restored from __doc__
"""
getBackends() -> retval
. @brief Returns list of all available backends
"""
pass
def getCameraBackends(): # real signature unknown; restored from __doc__
"""
getCameraBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(int index)`
"""
pass
def getStreamBackends(): # real signature unknown; restored from __doc__
"""
getStreamBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(filename)`
"""
pass
def getWriterBackends(): # real signature unknown; restored from __doc__
"""
getWriterBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoWriter()`
"""
pass
# no classes
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
ace559b46e79210154608496701d451bae6e9f1d
|
df21c2c16ecfb4a46b1d88b0474291ac67c8a05a
|
/app/migrations/0003_auto_20180708_1239.py
|
5d2a0d3ed37768c660d5b76e1dec863b6836cb8e
|
[] |
no_license
|
aditya2222/CatchUp
|
245dc4d122be7d596f8928d32a33acbbd754a4f3
|
915363faf7b59c81da070a70f9587f177a20d695
|
refs/heads/master
| 2020-03-22T14:21:17.689064
| 2018-07-08T14:08:24
| 2018-07-08T14:08:24
| 140,172,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# Generated by Django 2.0.7 on 2018-07-08 12:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20180708_1219'),
]
operations = [
migrations.AddField(
model_name='post',
name='CurrentUser',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='post',
name='UserName',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"adityasingh222247@gmail.com"
] |
adityasingh222247@gmail.com
|
e372ef2a62d72abec5ba965d40ac64c52e42e1cd
|
6da9c8536378131cc28d6a9bbe2d1de7de70fbe8
|
/Hackerrank/_Contests/Project_Euler/Python/pe009.py
|
25a6197d9d0384d61e15f5053dfd1e8bf479f99c
|
[] |
no_license
|
austinsonger/CodingChallenges
|
50f61330270cb6452715e6c28ae93b4595df6aa3
|
0cdc23fb909aa06a24294d923cedd37621e56a81
|
refs/heads/master
| 2021-04-30T13:21:36.111770
| 2019-07-16T18:49:02
| 2019-07-16T18:49:02
| 121,293,018
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
'''
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural
numbers, a < b < c, for which, a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet
for which a + b + c = 1000.
Find the product abc.
'''
__author__ = 'SUN'
if __name__ == '__main__':
for a in range(1, 333):
for b in range(a + 1, 500):
c = 1000 - a - b
if a ** 2 + b ** 2 == c ** 2:
print("a =", a, ", b =", b, ", c =", c, ', a * b * c = ', a * b
* c)
exit()
|
[
"austinvernsonger@protonmail.com"
] |
austinvernsonger@protonmail.com
|
be7023cfd8e20ca8aa5c7262dc094051426d8610
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/935e39a6b43731383c8ecd4f86063224edc819ebd6d95bfabab328fca05f4912/cython_runtime.py
|
37c3804fcd8cb40135e6b055396f17c83e3f5186
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
# encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\special\_ellip_harm_2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
246b0476cf8c2531744051a05c4b6a1b6a94b575
|
71969e3559d93efbd560265db5264b1d93ddaaa2
|
/LSpider/urls.py
|
9206fa2394bfa78e8e9f921e98893e22ef2bdb57
|
[
"MIT"
] |
permissive
|
morole/LSpider
|
e3cc28c4afd060325d12a622c587cb45841a6e6d
|
1dcdd820a8c0520cc8b3c851a5ba7bd06fcbf2f8
|
refs/heads/master
| 2023-06-20T21:58:43.979326
| 2021-08-02T02:36:45
| 2021-08-02T02:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
"""LSpider URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
path('', include('web.index.urls')),
]
|
[
"lorexxar@gmail.com"
] |
lorexxar@gmail.com
|
2de2010bec76a55f68fd7df8729f7d83ce87a3ea
|
fe8360d9284d8156cd557d3a757645c11849cdd9
|
/models/address.py
|
3c11b1443ea2136894676b06698d4e57f8b4cd02
|
[] |
no_license
|
hvanreenen/fhir-rest-server
|
5a1a5bcb9a3477d9f9d133c263f61ba202db5741
|
36ae55706aba0fdfcf084dbb24bd8c73929b3e0f
|
refs/heads/master
| 2021-01-10T23:45:06.793874
| 2016-10-20T09:57:04
| 2016-10-20T09:57:04
| 70,390,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Address) on 2016-10-07.
# 2016, SMART Health IT.
from . import element
class Address(element.Element):
""" A postal address.
There is a variety of postal address formats defined around the world. This
format defines a superset that is the basis for all addresses around the
world.
"""
resource_name = "Address"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.city = None #type: str
""" Name of city, town etc..
Type `str`. """
self.country = None #type: str
""" Country (can be ISO 3166 3 letter code).
Type `str`. """
self.district = None #type: str
""" District name (aka county).
Type `str`. """
self.line = None #type: List[str]
""" Street name, number, direction & P.O. Box etc..
List of `str` items. """
self.period = None #type: period.Period
""" Time period when address was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.postalCode = None #type: str
""" Postal code for area.
Type `str`. """
self.state = None #type: str
""" Sub-unit of country (abbreviations ok).
Type `str`. """
self.text = None #type: str
""" Text representation of the address.
Type `str`. """
self.type = None #type: str
""" postal | physical | both.
Type `str`. """
self.use = None #type: str
""" home | work | temp | old - purpose of this address.
Type `str`. """
super(Address, self).__init__(jsondict=jsondict, strict=strict)
def __str__(self):
return ''
def elementProperties(self):
js = super(Address, self).elementProperties()
js.extend([
("city", "city", str, False, None, False),
("country", "country", str, False, None, False),
("district", "district", str, False, None, False),
("line", "line", str, True, None, False),
("period", "period", period.Period, False, None, False),
("postalCode", "postalCode", str, False, None, False),
("state", "state", str, False, None, False),
("text", "text", str, False, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, False),
])
return js
from . import period
|
[
"henk-jan.van.reenen@nlhealthcareclinics.com"
] |
henk-jan.van.reenen@nlhealthcareclinics.com
|
e4ffd83343645d489fd7f0901317a07d4bdea4b1
|
c0a25bd77d98e6087c745d5fa2862c4a715a8f59
|
/standupmeeting/settings.py
|
241a863296e0a608133996cb32846d82c37359a1
|
[] |
no_license
|
codyowl/standupmeeting
|
a84f356b611bd87956b9aa15c58a6ca63fbffebc
|
bd2a782406901f492f54c1780e1d85d07fe51c20
|
refs/heads/master
| 2021-01-21T15:18:49.137211
| 2017-06-17T17:47:40
| 2017-06-17T17:47:40
| 91,837,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
"""
Django settings for standupmeeting project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z5)=jv1ho$%@891l#l)x47*zq@4*!0$v07fk@srtz+2)rps^3j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'home',
'dashboard',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'standupmeeting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'standupmeeting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'standupmeeting',
'USER': 'root',
'PASSWORD': 'root',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
[
"codyowl@gmail.com"
] |
codyowl@gmail.com
|
1b6a2aa29c25d01d109682bef2c4e146e7d3ae9a
|
7b4cc3814338b600db560324e615cf5c3a02bff5
|
/test/test_inline_response20019_ranks_sum.py
|
7c843351bb5fea0cf90e0166453b3ff6628bd10a
|
[] |
no_license
|
wood-run/opendota-client
|
58ea278c94d3edad0daf695438d5ec2a3d90fe08
|
2cd7defca67c7efde4ee414e9dcd8685245cd167
|
refs/heads/master
| 2022-12-29T02:17:26.862289
| 2020-10-13T08:29:06
| 2020-10-13T08:29:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. You can find data that can be used to convert hero and ability IDs and other information provided by the API from the [dotaconstants](https://github.com/odota/dotaconstants) repository. **Beginning 2018-04-22, the OpenDota API is limited to 50,000 free calls per month and 60 requests/minute** We offer a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more. # noqa: E501
OpenAPI spec version: 18.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opendota_client
from opendota_client.models.inline_response20019_ranks_sum import InlineResponse20019RanksSum # noqa: E501
from opendota_client.rest import ApiException
class TestInlineResponse20019RanksSum(unittest.TestCase):
"""InlineResponse20019RanksSum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20019RanksSum(self):
"""Test InlineResponse20019RanksSum"""
# FIXME: construct object with mandatory attributes with example values
# model = opendota_client.models.inline_response20019_ranks_sum.InlineResponse20019RanksSum() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"chujiyang@gmail.com"
] |
chujiyang@gmail.com
|
75a51dcedafba4f54f170bc433e959f80f46a919
|
61e98b0302a43ab685be4c255b4ecf2979db55b6
|
/sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/too_many_nested_blocks.py
|
47dbf441bd71b32547d4d652a501a6d3189ff396
|
[
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
dzenyu/kafka
|
5631c05a6de6e288baeb8955bdddf2ff60ec2a0e
|
d69a24bce8d108f43376271f89ecc3b81c7b6622
|
refs/heads/master
| 2021-07-16T12:31:09.623509
| 2021-06-28T18:22:16
| 2021-06-28T18:22:16
| 198,724,535
| 0
| 0
|
Apache-2.0
| 2019-07-24T23:51:47
| 2019-07-24T23:51:46
| null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
"""Checks the maximum block level is smaller than 6 in function definitions"""
#pylint: disable=using-constant-test, missing-docstring, too-many-return-statements
def my_function():
if 1: # [too-many-nested-blocks]
for i in range(10):
if i == 2:
while True:
try:
if True:
i += 1
except IOError:
pass
if 1:
for i in range(10):
if i == 2:
while True:
try:
i += 1
except IOError:
pass
def nested_func():
if True:
for i in range(10):
while True:
if True:
if True:
yield i
nested_func()
def more_complex_function():
attr1 = attr2 = attr3 = [1, 2, 3]
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
def elif_function():
arg = None
if arg == 1:
return 1
elif arg == 2:
return 2
elif arg == 3:
return 3
elif arg == 4:
return 4
elif arg == 5:
return 5
elif arg == 6:
return 6
elif arg == 7:
return 7
def else_if_function():
arg = None
if arg == 1: # [too-many-nested-blocks]
return 1
else:
if arg == 2:
return 2
else:
if arg == 3:
return 3
else:
if arg == 4:
return 4
else:
if arg == 5:
return 5
else:
if arg == 6:
return 6
else:
if arg == 7:
return 7
|
[
"alex.barreto@databricks.com"
] |
alex.barreto@databricks.com
|
921edfd522099ada4d11a5a777e54f9d2dca360b
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.11.16/3/1569573269.py
|
7413ac80608e26f988f405d4836f82d6a23f8641
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
import functools
import typing
import string
import random
import pytest
def leap(j: int) -> bool:
if j % 4 == 0 and (j % 100 == 0 and j % 400 != 0):
return False
elif j % 4 == 0 and (j % 100 == 0 or j % 400 != 0):
return True
else:
return False
######################################################################
## Lösung Teil 2 (Tests)
print(leap(2000))
print(leap(1660))
print(leap(1783))
print(leap(1800))
######################################################################
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
3c5b287ba292013072af0952810ed48c30cfb9e9
|
95341c85a8a116dba0d77644360ccfb346ceeb80
|
/src/api-engine/api/routes/node/serializers.py
|
9d954df567d1319bff4f28d77173fa89c21c0968
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
kuochunchang/cello
|
109204905a6be17c47b6aa3268ee4bbfeadce43a
|
1f778cea3a2021aabadd48e41cdd69ed1f8e979c
|
refs/heads/master
| 2020-06-03T05:42:43.108481
| 2019-05-28T13:45:05
| 2019-05-28T13:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,661
|
py
|
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import serializers
from api.common.enums import (
Operation,
NetworkType,
FabricNodeType,
FabricVersions,
HostType,
)
from api.common.serializers import PageQuerySerializer
from api.models import Node
LOG = logging.getLogger(__name__)
class NodeQuery(PageQuerySerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID, only operator can use this field",
required=False,
allow_null=True,
)
class Meta:
model = Node
fields = (
"page",
"per_page",
"type",
"name",
"network_type",
"network_version",
"agent_id",
)
extra_kwargs = {"type": {"required": False}}
class NodeIDSerializer(serializers.Serializer):
id = serializers.UUIDField(help_text="ID of node")
class NodeInListSerializer(NodeIDSerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID", required=False, allow_null=True
)
network_id = serializers.UUIDField(
help_text="Network ID", required=False, allow_null=True
)
class Meta:
model = Node
fields = (
"id",
"type",
"name",
"network_type",
"network_version",
"created_at",
"agent_id",
"network_id",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
}
class NodeListSerializer(serializers.Serializer):
data = NodeInListSerializer(many=True, help_text="Nodes list")
total = serializers.IntegerField(
help_text="Total number of node", min_value=0
)
class NodeCreateBody(serializers.ModelSerializer):
agent_type = serializers.ChoiceField(
help_text="Agent type",
choices=HostType.to_choices(True),
required=False,
)
class Meta:
model = Node
fields = (
"network_type",
"network_version",
"type",
"agent_type",
"agent",
)
extra_kwargs = {
"network_type": {"required": True},
"network_version": {"required": True},
"type": {"required": True},
}
def validate(self, attrs):
network_type = attrs.get("network_type")
node_type = attrs.get("type")
network_version = attrs.get("network_version")
agent_type = attrs.get("agent_type")
agent = attrs.get("agent")
if network_type == NetworkType.Fabric.name.lower():
if network_version not in FabricVersions.values():
raise serializers.ValidationError("Not valid fabric version")
if node_type not in FabricNodeType.names():
raise serializers.ValidationError(
"Not valid node type for %s" % network_type
)
if agent_type is None and agent is None:
raise serializers.ValidationError("Please set agent_type or agent")
if agent_type and agent:
if agent_type != agent.type:
raise serializers.ValidationError(
"agent type not equal to agent"
)
return attrs
class NodeOperationSerializer(serializers.Serializer):
action = serializers.ChoiceField(
help_text=Operation.get_info("Operation for node:", list_str=True),
choices=Operation.to_choices(True),
)
|
[
"hightall@me.com"
] |
hightall@me.com
|
5e93c1c35118d3f32a43a70d453bab1653d00a3c
|
1e9c4294652b0f4699d85516afd54fb5697b4800
|
/python_exam/0803/mnist_cnn02.py
|
13b66b9af78378cf5592a9f8e0ee4e3c7dc36b17
|
[] |
no_license
|
mgh3326/GyeonggiBigDataSpecialist
|
89c9fbf01036b35efca509ed3f74b9784e44ed19
|
29192a66df0913c6d9b525436772c8fd51a013ac
|
refs/heads/master
| 2023-04-06T07:09:09.057634
| 2019-06-20T23:35:33
| 2019-06-20T23:35:33
| 138,550,772
| 3
| 2
| null | 2023-03-24T22:43:06
| 2018-06-25T06:10:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,430
|
py
|
# -*- coding: utf-8 -*-
"""
ml_day4 (2018.08.02)
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#### CNN
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
###########################################
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
## 첫번째 합성곱 레이어
# W_conv1 = weight_variable([5, 5, 1, 32])
# b_conv1 = bias_variable([32])
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))
# h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
conv2d01 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME')
h_conv1 = tf.nn.relu(conv2d01 + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 두번째 합성곱 레이어
# W_conv2 = weight_variable([5, 5, 32, 64])
# b_conv2 = bias_variable([64])
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
# h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# h_pool2 = max_pool_2x2(h_conv2)
conv2d02 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')
h_conv2 = tf.nn.relu(conv2d02 + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 완전 연결 계층
# W_fc1 = weight_variable([7 * 7 * 64, 1024])
# b_fc1 = bias_variable([1024])
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
## 드롭아웃
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
## 최종 소프트맥스
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_variable([10])
W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
## 모델 훈련 및 평가
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
26accc85bb295eeec34334972d717689820a06f2
|
1c822c0d49d7b67b0896c066958148a7b0731924
|
/Basic_Concepts_of_String_Manipulation/First_day!.py
|
d6cff37297cfcb07407b916cacdfdf68deaf9adc
|
[
"MIT"
] |
permissive
|
RKiddle/python_reg_expressions
|
7e13a16475476c88543fde6dc55b53ec2fccbe37
|
9e89c1c59677ffa19a4c64a37e92bbea33fad88e
|
refs/heads/master
| 2020-06-23T00:34:07.027628
| 2019-10-27T14:51:32
| 2019-10-27T14:51:32
| 198,446,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# Find characters in movie variable
length_string = len(movie)
# Convert to string
to_string = str(length_string)
# Predefined variable
statement = "Number of characters in this review:"
# Concatenate strings and print result
print(statement + " " + to_string)
|
[
"noreply@github.com"
] |
RKiddle.noreply@github.com
|
d3f0507bedcb7480314209d9473afa6749e406ff
|
e1f8bb28b022720445debea589c9cf091103a303
|
/doc/sphinxext/mock_gui_toolkits.py
|
097a3409b16793df0a2333fa9b2e06ab2289e15a
|
[] |
no_license
|
demotu/matplotlib
|
e5a4e6c7047373b3ead918c40c97f93eb09c562d
|
1662e05278ecaea064b9149c4fcb15df9f337862
|
refs/heads/master
| 2021-01-22T00:06:39.310427
| 2018-06-12T20:38:12
| 2018-06-12T20:38:12
| 24,751,842
| 1
| 0
| null | 2018-06-12T20:38:13
| 2014-10-03T08:38:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,886
|
py
|
import sys
from unittest.mock import MagicMock
class MyCairoCffi(MagicMock):
pass
class MyPyQt4(MagicMock):
class QtGui(object):
# PyQt4.QtGui public classes.
# Generated with
# textwrap.fill([name for name in dir(PyQt4.QtGui)
# if isinstance(getattr(PyQt4.QtGui, name), type)])
_QtGui_public_classes = """\
Display QAbstractButton QAbstractGraphicsShapeItem
QAbstractItemDelegate QAbstractItemView QAbstractPrintDialog
QAbstractProxyModel QAbstractScrollArea QAbstractSlider
QAbstractSpinBox QAbstractTextDocumentLayout QAction QActionEvent
QActionGroup QApplication QBitmap QBoxLayout QBrush QButtonGroup
QCalendarWidget QCheckBox QClipboard QCloseEvent QColor QColorDialog
QColumnView QComboBox QCommandLinkButton QCommonStyle QCompleter
QConicalGradient QContextMenuEvent QCursor QDataWidgetMapper QDateEdit
QDateTimeEdit QDesktopServices QDesktopWidget QDial QDialog
QDialogButtonBox QDirModel QDockWidget QDoubleSpinBox QDoubleValidator
QDrag QDragEnterEvent QDragLeaveEvent QDragMoveEvent QDropEvent
QErrorMessage QFileDialog QFileIconProvider QFileOpenEvent
QFileSystemModel QFocusEvent QFocusFrame QFont QFontComboBox
QFontDatabase QFontDialog QFontInfo QFontMetrics QFontMetricsF
QFormLayout QFrame QGesture QGestureEvent QGestureRecognizer QGlyphRun
QGradient QGraphicsAnchor QGraphicsAnchorLayout QGraphicsBlurEffect
QGraphicsColorizeEffect QGraphicsDropShadowEffect QGraphicsEffect
QGraphicsEllipseItem QGraphicsGridLayout QGraphicsItem
QGraphicsItemAnimation QGraphicsItemGroup QGraphicsLayout
QGraphicsLayoutItem QGraphicsLineItem QGraphicsLinearLayout
QGraphicsObject QGraphicsOpacityEffect QGraphicsPathItem
QGraphicsPixmapItem QGraphicsPolygonItem QGraphicsProxyWidget
QGraphicsRectItem QGraphicsRotation QGraphicsScale QGraphicsScene
QGraphicsSceneContextMenuEvent QGraphicsSceneDragDropEvent
QGraphicsSceneEvent QGraphicsSceneHelpEvent QGraphicsSceneHoverEvent
QGraphicsSceneMouseEvent QGraphicsSceneMoveEvent
QGraphicsSceneResizeEvent QGraphicsSceneWheelEvent
QGraphicsSimpleTextItem QGraphicsTextItem QGraphicsTransform
QGraphicsView QGraphicsWidget QGridLayout QGroupBox QHBoxLayout
QHeaderView QHelpEvent QHideEvent QHoverEvent QIcon QIconDragEvent
QIconEngine QIconEngineV2 QIdentityProxyModel QImage QImageIOHandler
QImageReader QImageWriter QInputContext QInputContextFactory
QInputDialog QInputEvent QInputMethodEvent QIntValidator QItemDelegate
QItemEditorCreatorBase QItemEditorFactory QItemSelection
QItemSelectionModel QItemSelectionRange QKeyEvent QKeyEventTransition
QKeySequence QLCDNumber QLabel QLayout QLayoutItem QLineEdit
QLinearGradient QListView QListWidget QListWidgetItem QMainWindow
QMatrix QMatrix2x2 QMatrix2x3 QMatrix2x4 QMatrix3x2 QMatrix3x3
QMatrix3x4 QMatrix4x2 QMatrix4x3 QMatrix4x4 QMdiArea QMdiSubWindow
QMenu QMenuBar QMessageBox QMimeSource QMouseEvent
QMouseEventTransition QMoveEvent QMovie QPageSetupDialog QPaintDevice
QPaintEngine QPaintEngineState QPaintEvent QPainter QPainterPath
QPainterPathStroker QPalette QPanGesture QPen QPicture QPictureIO
QPinchGesture QPixmap QPixmapCache QPlainTextDocumentLayout
QPlainTextEdit QPolygon QPolygonF QPrintDialog QPrintEngine
QPrintPreviewDialog QPrintPreviewWidget QPrinter QPrinterInfo
QProgressBar QProgressDialog QProxyModel QPushButton QPyTextObject
QQuaternion QRadialGradient QRadioButton QRawFont QRegExpValidator
QRegion QResizeEvent QRubberBand QScrollArea QScrollBar
QSessionManager QShortcut QShortcutEvent QShowEvent QSizeGrip
QSizePolicy QSlider QSortFilterProxyModel QSound QSpacerItem QSpinBox
QSplashScreen QSplitter QSplitterHandle QStackedLayout QStackedWidget
QStandardItem QStandardItemModel QStaticText QStatusBar
QStatusTipEvent QStringListModel QStyle QStyleFactory QStyleHintReturn
QStyleHintReturnMask QStyleHintReturnVariant QStyleOption
QStyleOptionButton QStyleOptionComboBox QStyleOptionComplex
QStyleOptionDockWidget QStyleOptionDockWidgetV2 QStyleOptionFocusRect
QStyleOptionFrame QStyleOptionFrameV2 QStyleOptionFrameV3
QStyleOptionGraphicsItem QStyleOptionGroupBox QStyleOptionHeader
QStyleOptionMenuItem QStyleOptionProgressBar QStyleOptionProgressBarV2
QStyleOptionRubberBand QStyleOptionSizeGrip QStyleOptionSlider
QStyleOptionSpinBox QStyleOptionTab QStyleOptionTabBarBase
QStyleOptionTabBarBaseV2 QStyleOptionTabV2 QStyleOptionTabV3
QStyleOptionTabWidgetFrame QStyleOptionTabWidgetFrameV2
QStyleOptionTitleBar QStyleOptionToolBar QStyleOptionToolBox
QStyleOptionToolBoxV2 QStyleOptionToolButton QStyleOptionViewItem
QStyleOptionViewItemV2 QStyleOptionViewItemV3 QStyleOptionViewItemV4
QStylePainter QStyledItemDelegate QSwipeGesture QSyntaxHighlighter
QSystemTrayIcon QTabBar QTabWidget QTableView QTableWidget
QTableWidgetItem QTableWidgetSelectionRange QTabletEvent
QTapAndHoldGesture QTapGesture QTextBlock QTextBlockFormat
QTextBlockGroup QTextBlockUserData QTextBrowser QTextCharFormat
QTextCursor QTextDocument QTextDocumentFragment QTextDocumentWriter
QTextEdit QTextFormat QTextFragment QTextFrame QTextFrameFormat
QTextImageFormat QTextInlineObject QTextItem QTextLayout QTextLength
QTextLine QTextList QTextListFormat QTextObject QTextObjectInterface
QTextOption QTextTable QTextTableCell QTextTableCellFormat
QTextTableFormat QTimeEdit QToolBar QToolBox QToolButton QToolTip
QTouchEvent QTransform QTreeView QTreeWidget QTreeWidgetItem
QTreeWidgetItemIterator QUndoCommand QUndoGroup QUndoStack QUndoView
QVBoxLayout QValidator QVector2D QVector3D QVector4D QWhatsThis
QWhatsThisClickedEvent QWheelEvent QWidget QWidgetAction QWidgetItem
QWindowStateChangeEvent QWizard QWizardPage QWorkspace
QX11EmbedContainer QX11EmbedWidget QX11Info
"""
for _name in _QtGui_public_classes.split():
locals()[_name] = type(_name, (), {})
del _name
class MySip(MagicMock):
def getapi(*args):
return 1
class MyWX(MagicMock):
class Panel(object):
pass
class ToolBar(object):
pass
class Frame(object):
pass
def setup(app):
sys.modules.update(
cairocffi=MyCairoCffi(),
PyQt4=MyPyQt4(),
sip=MySip(),
wx=MyWX(),
)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
|
[
"anntzer.lee@gmail.com"
] |
anntzer.lee@gmail.com
|
6a4675054e6b1622b80d37ae794ec9fbb98e9ef6
|
bdd2bbef297d6edd3d335c48ab89955925d331d5
|
/encyclopedia/urls.py
|
5f0ded8610846862e5b0f87a8029d45d825b1c9c
|
[] |
no_license
|
michelle2014/CS50W-Wiki
|
424569bb1e2fd7c83fa7ff2a98c51821bcfc04fb
|
0301e48db06720b0419c5939816a9be345dff9b0
|
refs/heads/master
| 2023-07-28T05:05:42.512177
| 2021-09-05T05:33:06
| 2021-09-05T05:33:06
| 327,516,261
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("create", views.create, name="create"),
path("edit/<str:title>", views.edit, name="edit"),
path("search", views.search, name="search"),
path("<str:title>", views.entry, name="entry")
]
|
[
"michelle.transbetter@gmail.com"
] |
michelle.transbetter@gmail.com
|
c7432b46e7815589e67b5f13126792906baa874b
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/optimized_659.py
|
2a149860964006fc23b9d40cde8de4ed76a7020a
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,583
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((513.189, 440.035, 538.548), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((570.351, 361.611, 332.233), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((553.44, 456.734, 234.315), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((596.433, 688.793, 431.456), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((565.199, 405.652, 345.85), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((579.294, 381.001, 348.501), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((368.44, 406.247, 367.378), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((448.219, 577.439, 439.358), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((307.69, 553.195, 375.403), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((783.564, 349.669, 391.617), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((574.444, 482.119, 400.257), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((585.011, 387.232, 281.884), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((580.031, 621.288, 368.759), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((579.496, 421.971, 322.229), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((561.886, 406.09, 420.253), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((410.629, 371.023, 343.742), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((602.083, 424.911, 407.51), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((622.903, 431.478, 254.626), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((540.708, 579.704, 242.646), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((602.373, 438.065, 358.834), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((660.617, 589.743, 366.602), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
27a2b8233ca588d5ce1b4954241ac87f2ee31b23
|
99e44f844d78de330391f2b17bbf2e293bf24b1b
|
/pytorch/tools/autograd/nested_dict.py
|
e1e09814199153aa94647c2246c983b2ba3ea303
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
raghavnauhria/whatmt
|
be10d57bcd6134dd5714d0c4058abd56a1b35a13
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
refs/heads/master
| 2022-12-04T05:39:24.601698
| 2019-07-22T09:43:30
| 2019-07-22T09:43:30
| 193,026,689
| 0
| 1
|
MIT
| 2022-11-28T17:50:19
| 2019-06-21T03:48:20
|
C++
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
# TODO: refactor nested_dict into common library with ATen
class nested_dict(object):
"""
A nested dict is a dictionary with a parent. If key lookup fails,
it recursively continues into the parent. Writes always happen to
the top level dict.
"""
def __init__(self, base, parent):
self.base, self.parent = base, parent
def __contains__(self, item):
return item in self.base or item in self.parent
def __getitem__(self, x):
r = self.base.get(x)
if r is not None:
return r
return self.parent[x]
|
[
"rnauhria@gmail.com"
] |
rnauhria@gmail.com
|
99286b2ac35687ea7459db1976eefff58c6ac283
|
3a3c7ab7d9cadfc5610888e07dbb9d6eaaf8aa01
|
/scripts/OpenFOAM/generateBodyOBJFile.py
|
b2dfdaab64702e895cf9fb115ccd64fdb7f598dc
|
[
"MIT"
] |
permissive
|
cubayang/snake
|
7e430e8bcbf4acf99c007e5c1a646e0e6f45280c
|
f78844235f4d9b815b53a707f276dd634bce7a07
|
refs/heads/master
| 2021-01-17T20:24:27.359901
| 2016-08-18T00:34:18
| 2016-08-18T00:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
# file: generateBodyOBJFile.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# brief: Convert input coordinates file into a OBJ file.
import os
import argparse
from snake.openfoam import OBJFile
from snake import miscellaneous
def parse_command_line():
"""Parses the command-line."""
print('[info] parsing command-line ...'),
# create the parser
parser = argparse.ArgumentParser(description='Generates an .OBJ file '
'that will be readable by OpenFOAM '
'mesh generator: SnappyHexMesh',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser = argparse.ArgumentParser(description='Generates an .OBJ file '
# 'that will be readable by OpenFOAM '
# 'mesh generator: SnappyHexMesh')
# fill the parser with arguments
parser.add_argument('--file', dest='file_path',
type=str,
metavar=('<path>'),
help='path of the coordinates file to convert')
parser.add_argument('--name', dest='name',
type=str,
metavar=('<name>'),
help='name of the .OBJ file generated (no extension)')
parser.add_argument('--extrusion-limits', dest='extrusion_limits',
type=float, nargs=2,
default=[0.0, 1.0],
metavar=('start', 'end'),
help='limits of the extrusion in the 3rd direction')
parser.add_argument('--save-directory', dest='save_directory',
type=str,
default=os.getcwd(),
metavar=('<directory>'),
help='directory where to save the .obj file')
# parse given options file
parser.add_argument('--options',
type=open, action=miscellaneous.ReadOptionsFromFile,
metavar=('<path>'),
help='path of the file with options to parse')
print('done')
return parser.parse_args()
def main():
"""Generates an .OBJ file from a given coordinates file."""
args = parse_command_line()
body = OBJFile.Body2d(args.file_path,
name=args.name,
extrusion_limits=args.extrusion_limits)
body.write(save_directory=args.save_directory)
if __name__ == '__main__':
print('\n[{}] START\n'.format(os.path.basename(__file__)))
main()
print('\n[{}] END\n'.format(os.path.basename(__file__)))
|
[
"mesnardo@gwu.edu"
] |
mesnardo@gwu.edu
|
51a5de5a76db69817407b3251044c8d8f122a59f
|
264f392530710b287ac54f40ea805638c6348cc3
|
/scripts/run_tabular_bayes_dice.py
|
3326a3f91fd93e0b96222614b928658af9ee75ab
|
[
"Apache-2.0"
] |
permissive
|
google-research/dice_rl
|
b26dd2231b0a664f11e0ede08d8209a4ace1cd2f
|
6551950608ad0472ddf6e8f4075f51793c9d2763
|
refs/heads/master
| 2023-08-06T21:35:15.690175
| 2023-01-30T19:26:12
| 2023-01-30T19:27:38
| 285,369,787
| 106
| 14
|
Apache-2.0
| 2023-01-30T19:27:44
| 2020-08-05T18:15:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,480
|
py
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tabular BayesDICE.
Make sure to generate the datasets prior to running this script (see
`scripts/create_dataset.py`). The default parameters here should reproduce
the published bandit and frozenlake results. For Taxi, pass in
solve_for_state_action_ratio=False.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import tensorflow_probability as tfp
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from dice_rl.environments.env_policies import get_target_policy
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.estimators.tabular_bayes_dice import TabularBayesDice
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'frozenlake', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 5, 'Number of trajectories to collect.')
flags.DEFINE_float('alpha', 0.0,
'How close is the behavior policy to optimal policy.')
flags.DEFINE_integer('max_trajectory_length', 100,
'Cutoff trajectory at this step.')
flags.DEFINE_bool('tabular_obs', True, 'Whether to use tabular observations.')
flags.DEFINE_string('load_dir', None, 'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None, 'Directory to save estimation results.')
flags.DEFINE_float('gamma', 0.99, 'Discount factor.')
flags.DEFINE_integer('num_steps', 50000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size.')
flags.DEFINE_float('zeta_learning_rate', 1e-2, 'Zeta learning rate.')
flags.DEFINE_float('nu_learning_rate', 1e-2, 'Value learning rate.')
flags.DEFINE_bool('solve_for_state_action_ratio', True,
'Whether to use tabular observations.')
flags.DEFINE_float('alpha_target', 1.0,
'How close is the target policy to optimal policy.')
flags.DEFINE_float('kl_regularizer', 1., 'LP regularizer of kl(q||p).')
flags.DEFINE_float('eps_std', 1., 'Epsilon std for reparametrization.')
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
load_dir = FLAGS.load_dir
save_dir = FLAGS.save_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
alpha = FLAGS.alpha
alpha_target = FLAGS.alpha_target
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
zeta_learning_rate = FLAGS.zeta_learning_rate
nu_learning_rate = FLAGS.nu_learning_rate
solve_for_state_action_ratio = FLAGS.solve_for_state_action_ratio
eps_std = FLAGS.eps_std
kl_regularizer = FLAGS.kl_regularizer
target_policy = get_target_policy(
load_dir, env_name, tabular_obs, alpha=alpha_target)
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('behavior per-step',
estimator_lib.get_fullbatch_average(dataset, gamma=gamma))
train_hparam_str = ('eps{EPS}_kl{KL}').format(EPS=eps_std, KL=kl_regularizer)
if save_dir is not None:
# Save for a specific alpha target
target_hparam_str = hparam_str.replace(
'alpha{}'.format(alpha), 'alpha{}_alphat{}'.format(alpha, alpha_target))
save_dir = os.path.join(save_dir, target_hparam_str, train_hparam_str)
summary_writer = tf.summary.create_file_writer(logdir=save_dir)
else:
summary_writer = tf.summary.create_noop_writer()
estimator = TabularBayesDice(
dataset_spec=dataset.spec,
gamma=gamma,
solve_for_state_action_ratio=solve_for_state_action_ratio,
zeta_learning_rate=zeta_learning_rate,
nu_learning_rate=nu_learning_rate,
kl_regularizer=kl_regularizer,
eps_std=eps_std,
)
estimator.prepare_dataset(dataset, target_policy)
global_step = tf.Variable(0, dtype=tf.int64)
tf.summary.experimental.set_step(global_step)
with summary_writer.as_default():
running_losses = []
running_estimates = []
for step in range(num_steps):
loss = estimator.train_step()[0]
running_losses.append(loss)
global_step.assign_add(1)
if step % 500 == 0 or step == num_steps - 1:
print('step', step, 'losses', np.mean(running_losses, 0))
estimate = estimator.estimate_average_reward(dataset, target_policy)
tf.debugging.check_numerics(estimate, 'NaN in estimate')
running_estimates.append(estimate)
tf.print('est', tf.math.reduce_mean(estimate),
tf.math.reduce_std(estimate))
running_losses = []
if save_dir is not None:
with tf.io.gfile.GFile(os.path.join(save_dir, 'results.npy'), 'w') as f:
np.save(f, running_estimates)
print('saved results to %s' % save_dir)
print('Done!')
if __name__ == '__main__':
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
df2805ded0f8ca965205b075e6a84753cff47e12
|
b2fb3c44c67eb61c41465996c24c094071c457cc
|
/LeetCode/print_words_vertically.py
|
3027e23f1272d85cbab87a94fe941c5d21586733
|
[] |
no_license
|
zelzhan/Challenges-and-contests
|
8edd3a2f07a0538903dc885c86e15f02783821c5
|
e7df9b37ad1130d37f3efbf0114d06b6f3b4a4f1
|
refs/heads/master
| 2022-12-28T23:16:30.807040
| 2020-10-13T10:09:22
| 2020-10-13T10:09:22
| 118,697,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
from collections import defaultdict
class Solution:
def printVertically(self, s: str) -> List[str]:
s = s.split(" ")
hashtable = defaultdict(int)
for i, string in enumerate(s):
hashtable[i] = len(string)
max_length = max(hashtable.values())
j = 0
res = []
def pop_zeroes(cont):
i = -1
while cont[i] == " ":
cont.pop()
while j != max_length:
word = []
for i, string in enumerate(s):
if hashtable[i] == 0:
word.append(" ")
else:
hashtable[i] -=1
word.append(string[j])
j+=1
pop_zeroes(word)
res.append("".join(word))
return res
|
[
"noreply@github.com"
] |
zelzhan.noreply@github.com
|
dfabe356284d91b7abe48701e4cb31e026728bd1
|
e8d719fe45dfbff9cbbc4ed872832cec6cabaca6
|
/307_Range_Sum_Query_Mutable_TLE.py
|
09a96706fa016fe861dd7404e808a7fa4a7d89a3
|
[] |
no_license
|
nlfox/leetcode
|
64f4f48d7f4be6df0542e51cc7037df40bf184a3
|
d61363f99de3d591ebc8cd94f62544a31a026d55
|
refs/heads/master
| 2020-12-21T01:43:01.792899
| 2016-11-14T23:10:12
| 2016-11-14T23:10:12
| 56,680,839
| 2
| 0
| null | 2016-05-17T17:16:37
| 2016-04-20T11:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums = nums
self.len = len(nums)
self.d = []
last = 0
for i in nums:
self.d.append(last)
last += i
self.d.append(last)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
self.nums[i] = val
last = self.d[i]
for j in xrange(i+1, self.len + 1):
last += self.nums[j - 1]
self.d[j] = last
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.d[j + 1] - self.d[i]
# Your NumArray object will be instantiated and called as such:
numArray = NumArray([9, -8])
print numArray.update(0, 3)
print numArray.sumRange(1, 1)
print numArray.sumRange(0, 1)
print numArray.update(1, -3)
print numArray.sumRange(0, 1)
|
[
"nlfox@msn.cn"
] |
nlfox@msn.cn
|
e6d2396b1679238553cf86553f1d2cbe848c4b65
|
b8c4ef9ccab22717ab97ab2fb100614d962a5820
|
/src/test/python/com/skalicky/python/interviewpuzzles/test_find_all_concatenated_words_in_dictionary.py
|
31250fe2c29509af0bab7db00e4be68e00a269b3
|
[] |
no_license
|
Sandeep8447/interview_puzzles
|
1d6c8e05f106c8d5c4c412a9f304cb118fcc90f4
|
a3c1158fe70ed239f8548ace8d1443a431b644c8
|
refs/heads/master
| 2023-09-02T21:39:32.747747
| 2021-10-30T11:56:57
| 2021-10-30T11:56:57
| 422,867,683
| 0
| 0
| null | 2021-10-30T11:56:58
| 2021-10-30T11:55:17
| null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
from unittest import TestCase
from src.main.python.com.skalicky.python.interviewpuzzles.find_all_concatenated_words_in_dictionary import Solution
class TestSolution(TestCase):
def test_find_all_concatenated_words_in_dictionary__when_input_contains_words_of_same_length__then_output_is_empty(
self):
self.assertSetEqual(set(), Solution.find_all_concatenated_words_in_dictionary({'cat', 'dog', 'eat'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_multiple_concatenated_words_of_2_other_words__then_these_words_are_in_output(
self):
self.assertSetEqual({'techlead', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'tech', 'lead', 'techlead', 'cat', 'cats', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_concatenated_word_of_3_other_words__then_this_word_is_in_output(
self):
self.assertSetEqual({'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 's', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_word_concatenated_by_multiple_ways__then_this_word_is_in_output(
self):
self.assertSetEqual({'cats', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 'cats', 's', 'dog', 'catsdog'}))
|
[
"skalicky.tomas@gmail.com"
] |
skalicky.tomas@gmail.com
|
6f6e441bbde59763d7fe65221a6f86714e769020
|
2082cd57fa2325a508af5f10bd00e8eca059bc09
|
/src/geometry/manifolds/translation_algebra.py
|
67b1958cc3594e112a9cdeb471c3976addf27f7f
|
[] |
no_license
|
efernandez/geometry
|
98e5894a83acaa32eefb2187374d4c34801a5600
|
ec7fa1308224f3d156c54495bc4b05ce47a41004
|
refs/heads/master
| 2021-01-18T16:51:13.964917
| 2014-11-04T14:03:59
| 2014-11-04T14:03:59
| 36,390,891
| 0
| 1
| null | 2015-05-27T19:35:14
| 2015-05-27T19:35:14
| null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from . import MatrixLieAlgebra
from .. import extract_pieces, combine_pieces
from contracts import contract
import numpy as np
class tran(MatrixLieAlgebra):
'''
lie algebra for translation
'''
@contract(n="1|2|3")
def __init__(self, n):
MatrixLieAlgebra.__init__(self, n + 1, dimension=n)
def norm(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return np.linalg.norm(v)
def project(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return combine_pieces(W * 0, v, v * 0, 0)
def __repr__(self):
return 'tr%s' % (self.n - 1)
def interesting_points(self):
points = []
points.append(self.zero())
return points
@contract(a='belongs')
def vector_from_algebra(self, a):
W, v, zero, zero = extract_pieces(a) # @UnusedVariable
if v.shape == ():
v = v.reshape(1)
assert v.size == self.n - 1
return v
@contract(returns='belongs', v='array[K]')
def algebra_from_vector(self, v):
assert v.size == self.n - 1
return combine_pieces(np.zeros((self.n - 1, self.n - 1)), v, v * 0, 0)
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
006d5216c55a276b30c61478f4da189fc81ca037
|
7bf287e00b35f50afa70e8585f41d1db543d98f2
|
/Medium/FindLeavesOfBinaryTree.py
|
c914ab48ab422fd2ee73e77c175d3f5a5d0fe9c8
|
[] |
no_license
|
mangalagb/Leetcode
|
eac611453de07ffc635265e98c39b46255cf76c6
|
fcf6c3d5d60d13706950247d8a2327adc5faf17e
|
refs/heads/master
| 2022-05-14T23:16:28.007044
| 2022-04-29T19:33:24
| 2022-04-29T19:33:24
| 158,616,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,011
|
py
|
# #Given the root of a binary tree, collect a tree's nodes as if you were doing this:
#
# Collect all the leaf nodes.
# Remove all the leaf nodes.
# Repeat until the tree is empty.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
ans = []
while root is not None:
leaves = []
self.remove_leaves(root, None, None, leaves)
#it is a root node
if len(leaves) == 0:
leaves.append(root.val)
root = None
ans.append(leaves)
return ans
def remove_leaves(self, node, parent, isLeftChild, leaves):
if not node:
return
#If node is a leaf
if not node.left and not node.right:
if isLeftChild is None:
return
if isLeftChild:
parent.left = None
else:
parent.right = None
leaves.append(node.val)
if node.left:
self.remove_leaves(node.left, node, True, leaves)
if node.right:
self.remove_leaves(node.right, node, False, leaves)
def make_tree(self):
root = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
root.left = node2
root.right = node3
node2.left = node4
node2.right = node5
return root
def make_tree1(self):
root = TreeNode(1)
return root
my_sol = Solution()
root = my_sol.make_tree()
print(my_sol.findLeaves(root)) #[[4,5,3],[2],[1]]
root = my_sol.make_tree1()
print(my_sol.findLeaves(root)) #[[1]]
|
[
"mangalagb@gmail.com"
] |
mangalagb@gmail.com
|
d2894ba6e632b91ec3412e5b44336eb0e03154d2
|
fec261e7717769078dd0044b3ac19e509ff65afa
|
/python/sort/selection_sort.py
|
bb4e47579b4fa5b22a4eeda18c29a39cc587698f
|
[] |
no_license
|
ne7ermore/playground
|
af94854c6da01b43b1e10ea891129a749ea9d807
|
072406e562e0d33c650ba01bf9ebfbe593f55d5c
|
refs/heads/master
| 2021-06-02T13:19:34.110406
| 2020-05-28T10:49:12
| 2020-05-28T10:49:12
| 108,945,081
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
def selection_sort(arr):
for i in range(len(arr)):
cor_index = i
for j in range(i, len(arr)):
if arr[j] < arr[cor_index]:
cor_index = j
arr[i], arr[cor_index] = arr[cor_index], arr[i]
return arr
if __name__ == "__main__":
arr = [10, 20, 5, 9, 3, 8, 12, 14, 90, 0, 60, 40, 23, 35, 95, 18]
assert len(selection_sort(arr)) == len(arr)
assert selection_sort(arr) == [0, 3, 5, 8, 9, 10, 12,
14, 18, 20, 23, 35, 40, 60, 90, 95]
|
[
"422618856@qq.com"
] |
422618856@qq.com
|
bc871b75ba4a48cd1baa270703581d5d3cbdfaaf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02646/s014246720.py
|
f54f26a26b36440b0006e25566aa3480de114870
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
import sys
A,V = map(int, input().split())
B,W = map(int, input().split())
T = int(input())
#これO(1)で解けそう。
# スピードが同じ or 逃げる方のスピードが速いと無理
if V <= W:
print("NO")
sys.exit()
# 鬼の方がスピードが速い場合で場合訳
distance_AB = abs(A-B)
speed_AB = abs(V-W)
if speed_AB * T >= distance_AB:
print("YES")
else:
print("NO")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b35629b13b4288e9f2d3b4ff7f9c2a7af9103d2b
|
2d055595705582784624c6bde5abf1b3854b34a9
|
/tweets/mixins.py
|
e35d46e2850f0171dca2e7dc62983077149c37d8
|
[] |
no_license
|
Anubhav722/twitter_clone
|
fc36568cb6b32ce1942923ffcf55ebcce714e53f
|
f76190b8f5f3ac8dfad87d35b2650c5285e5082b
|
refs/heads/master
| 2021-05-01T08:25:42.857828
| 2017-02-07T14:32:09
| 2017-02-07T14:32:09
| 79,710,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
from django import forms
from django.forms.utils import ErrorList
class FormUserNeededMixin(object):
def form_valid(self, form):
if self.request.user.is_authenticated():
form.instance.user = self.request.user
return super(FormUserNeededMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['User must be logged in to continue.'])
return self.form_invalid(form)
class UserOwnerMixin(object):
def form_valid(self, form):
if form.instance.user == self.request.user:
return super(UserOwnerMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['This user is not allowed to change the data'])
return self.form_invalid(form)
|
[
"anubhavs286@gmail.com"
] |
anubhavs286@gmail.com
|
a33b45b686b42f02891fe745b169f339692f91d2
|
acdd393c25b32779a637a05b5a5574aaecdda9d6
|
/pelican-plugins/more_categories/test_more_categories.py
|
41dc4a2241313d4debe74eb3c9a78d38b9c38ad9
|
[
"AGPL-3.0-only",
"MIT"
] |
permissive
|
JN-Blog/jn-blog.com
|
51f1b8f9011138b3ebf62b93c2ecaba9e2d514bf
|
669bf9a9c6813f2b7980792fb137f6718077aea1
|
refs/heads/master
| 2020-04-02T10:07:31.569949
| 2018-12-30T14:30:49
| 2018-12-30T14:30:49
| 154,325,262
| 0
| 0
|
MIT
| 2018-12-30T14:30:50
| 2018-10-23T12:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
"""Unit tests for the more_categories plugin"""
import os
import unittest
from . import more_categories
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_context, get_settings
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
more_categories.register()
settings = get_settings()
settings['DEFAULT_CATEGORY'] = 'default'
settings['CACHE_CONTENT'] = False
settings['PLUGINS'] = more_categories
context = get_context(settings)
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(base_path, 'test_data')
cls.generator = ArticlesGenerator(
context=context, settings=settings,
path=test_data_path, theme=settings['THEME'], output_path=None)
cls.generator.generate_context()
def test_generate_categories(self):
"""Test whether multiple categories are generated correctly,
including ancestor categories"""
cats_generated = [cat.name for cat, _ in self.generator.categories]
cats_expected = ['default', 'foo', 'foo/bar', 'foo/baz',]
self.assertEqual(sorted(cats_generated), sorted(cats_expected))
def test_assign_articles_to_categories(self):
"""Test whether articles are correctly assigned to categories,
including whether articles are not assigned multiple times to the same
ancestor category"""
for cat, articles in self.generator.categories:
self.assertEqual(len(articles), 1)
|
[
"julien.nuellas@gmail.com"
] |
julien.nuellas@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.